1998-03-07 19:24:35 +00:00
|
|
|
/*-
|
2000-01-09 21:13:48 +00:00
|
|
|
* Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
|
2003-02-13 17:47:44 +00:00
|
|
|
* Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
|
2011-01-22 19:25:49 +00:00
|
|
|
* Copyright 2009, 2010, 2011 Konstantin Belousov <kib@FreeBSD.ORG>.
|
1998-03-07 19:24:35 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dynamic linker for ELF.
|
|
|
|
*
|
|
|
|
* John Polstra <jdp@polstra.com>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __GNUC__
|
|
|
|
#error "GCC is needed to compile this file"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
2005-03-24 10:12:29 +00:00
|
|
|
#include <sys/mount.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
#include <sys/mman.h>
|
1999-08-30 01:50:41 +00:00
|
|
|
#include <sys/stat.h>
|
2010-08-17 09:05:39 +00:00
|
|
|
#include <sys/sysctl.h>
|
2007-01-09 17:50:05 +00:00
|
|
|
#include <sys/uio.h>
|
2009-03-18 13:40:37 +00:00
|
|
|
#include <sys/utsname.h>
|
2007-01-09 17:50:05 +00:00
|
|
|
#include <sys/ktrace.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#include <err.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include "debug.h"
|
|
|
|
#include "rtld.h"
|
2003-04-07 16:21:26 +00:00
|
|
|
#include "libmap.h"
|
2004-08-03 08:51:00 +00:00
|
|
|
#include "rtld_tls.h"
|
2011-08-24 20:05:13 +00:00
|
|
|
#include "rtld_printf.h"
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
#ifndef COMPAT_32BIT
|
2003-08-17 08:06:00 +00:00
|
|
|
#define PATH_RTLD "/libexec/ld-elf.so.1"
|
2004-03-21 01:21:26 +00:00
|
|
|
#else
|
|
|
|
#define PATH_RTLD "/libexec/ld-elf32.so.1"
|
|
|
|
#endif
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Types. */
|
|
|
|
typedef void (*func_ptr_type)();
|
2003-02-13 17:47:44 +00:00
|
|
|
typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Function declarations.
|
|
|
|
*/
|
1999-04-21 04:06:57 +00:00
|
|
|
static const char *basename(const char *);
|
2005-12-18 19:43:33 +00:00
|
|
|
static void die(void) __dead2;
|
2010-08-17 09:05:39 +00:00
|
|
|
static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
|
|
|
|
const Elf_Dyn **);
|
|
|
|
static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *);
|
2002-04-02 02:19:02 +00:00
|
|
|
static void digest_dynamic(Obj_Entry *, int);
|
1999-07-18 00:02:19 +00:00
|
|
|
static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *dlcheck(void *);
|
2012-01-07 10:33:01 +00:00
|
|
|
static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
|
2010-12-25 08:51:20 +00:00
|
|
|
int lo_flags, int mode);
|
2009-11-26 13:57:20 +00:00
|
|
|
static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
|
2003-02-13 17:47:44 +00:00
|
|
|
static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
|
2000-09-19 04:27:16 +00:00
|
|
|
static bool donelist_check(DoneList *, const Obj_Entry *);
|
2001-01-05 04:36:17 +00:00
|
|
|
static void errmsg_restore(char *);
|
|
|
|
static char *errmsg_save(void);
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *fill_search_info(const char *, size_t, void *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static char *find_library(const char *, const Obj_Entry *);
|
1998-09-05 03:31:00 +00:00
|
|
|
static const char *gethints(void);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void init_dag(Obj_Entry *);
|
2010-08-17 09:05:39 +00:00
|
|
|
static void init_rtld(caddr_t, Elf_Auxinfo **);
|
2006-09-19 16:48:08 +00:00
|
|
|
static void initlist_add_neededs(Needed_Entry *, Objlist *);
|
|
|
|
static void initlist_add_objects(Obj_Entry *, Obj_Entry **, Objlist *);
|
1998-09-02 02:00:20 +00:00
|
|
|
static void linkmap_add(Obj_Entry *);
|
|
|
|
static void linkmap_delete(Obj_Entry *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
|
|
|
|
static void unload_filtees(Obj_Entry *);
|
2009-11-26 13:57:20 +00:00
|
|
|
static int load_needed_objects(Obj_Entry *, int);
|
1998-09-22 02:09:56 +00:00
|
|
|
static int load_preload_objects(void);
|
2012-01-07 10:33:01 +00:00
|
|
|
static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
|
2011-01-10 16:09:35 +00:00
|
|
|
static void map_stacks_exec(RtldLockState *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_from_addr(const void *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
|
|
|
|
static void objlist_call_init(Objlist *, RtldLockState *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_clear(Objlist *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_init(Objlist *);
|
|
|
|
static void objlist_push_head(Objlist *, Obj_Entry *);
|
|
|
|
static void objlist_push_tail(Objlist *, Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void objlist_remove(Objlist *, Obj_Entry *);
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *path_enumerate(const char *, path_enum_proc, void *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, RtldLockState *);
|
2011-12-14 16:47:53 +00:00
|
|
|
static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now,
|
|
|
|
RtldLockState *lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
static int rtld_dirname(const char *, char *);
|
2009-03-18 13:40:37 +00:00
|
|
|
static int rtld_dirname_abs(const char *, char *);
|
2012-01-07 10:33:01 +00:00
|
|
|
static void *rtld_dlopen(const char *name, int fd, int mode);
|
1998-03-07 19:24:35 +00:00
|
|
|
static void rtld_exit(void);
|
|
|
|
static char *search_library_path(const char *, const char *);
|
2011-01-10 16:09:35 +00:00
|
|
|
static const void **get_program_var_addr(const char *, RtldLockState *);
|
1999-04-21 04:06:57 +00:00
|
|
|
static void set_program_var(const char *, const void *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static int symlook_default(SymLook *, const Obj_Entry *refobj);
|
2011-01-10 16:09:35 +00:00
|
|
|
static int symlook_global(SymLook *, DoneList *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void symlook_init_from_req(SymLook *, const SymLook *);
|
|
|
|
static int symlook_list(SymLook *, const Objlist *, DoneList *);
|
|
|
|
static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
|
|
|
|
static int symlook_obj1(SymLook *, const Obj_Entry *);
|
2006-09-19 16:48:08 +00:00
|
|
|
static void trace_loaded_objects(Obj_Entry *);
|
2003-02-17 20:58:27 +00:00
|
|
|
static void unlink_object(Obj_Entry *);
|
2000-01-09 21:13:48 +00:00
|
|
|
static void unload_object(Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void unref_dag(Obj_Entry *);
|
2003-05-08 01:31:36 +00:00
|
|
|
static void ref_dag(Obj_Entry *);
|
2009-06-20 14:16:41 +00:00
|
|
|
static int origin_subst_one(char **, const char *, const char *,
|
|
|
|
const char *, char *);
|
|
|
|
static char *origin_subst(const char *, const char *);
|
2005-12-18 19:43:33 +00:00
|
|
|
static int rtld_verify_versions(const Objlist *);
|
|
|
|
static int rtld_verify_object_versions(Obj_Entry *);
|
|
|
|
static void object_add_name(Obj_Entry *, const char *);
|
|
|
|
static int object_match_name(const Obj_Entry *, const char *);
|
2007-01-09 17:50:05 +00:00
|
|
|
static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
|
2010-08-23 15:27:03 +00:00
|
|
|
static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
|
|
|
|
struct dl_phdr_info *phdr_info);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2011-09-03 11:41:00 +00:00
|
|
|
void r_debug_state(struct r_debug *, struct link_map *) __noinline;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Data declarations.
|
|
|
|
*/
|
|
|
|
static char *error_message; /* Message for dlerror(), or NULL */
|
2003-02-13 17:47:44 +00:00
|
|
|
struct r_debug r_debug; /* for GDB; */
|
2003-05-31 14:45:11 +00:00
|
|
|
static bool libmap_disable; /* Disable libmap */
|
2010-12-25 08:51:20 +00:00
|
|
|
static bool ld_loadfltr; /* Immediate filters processing */
|
2005-02-04 02:46:41 +00:00
|
|
|
static char *libmap_override; /* Maps to use in addition to libmap.conf */
|
1998-03-07 19:24:35 +00:00
|
|
|
static bool trust; /* False for setuid and setgid programs */
|
2005-03-24 10:12:29 +00:00
|
|
|
static bool dangerous_ld_env; /* True if environment variables have been
|
|
|
|
used to affect the libraries loaded */
|
1998-03-07 19:24:35 +00:00
|
|
|
static char *ld_bind_now; /* Environment variable for immediate binding */
|
|
|
|
static char *ld_debug; /* Environment variable for debugging */
|
|
|
|
static char *ld_library_path; /* Environment variable for search path */
|
1998-09-22 02:09:56 +00:00
|
|
|
static char *ld_preload; /* Environment variable for libraries to
|
|
|
|
load first */
|
2009-03-23 16:49:00 +00:00
|
|
|
static char *ld_elf_hints_path; /* Environment variable for alternative hints path */
|
1998-05-01 08:39:27 +00:00
|
|
|
static char *ld_tracing; /* Called from ldd to print libs */
|
2007-01-09 17:50:05 +00:00
|
|
|
static char *ld_utrace; /* Use utrace() to log events. */
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_list; /* Head of linked list of shared objects */
|
|
|
|
static Obj_Entry **obj_tail; /* Link field of last object in list */
|
|
|
|
static Obj_Entry *obj_main; /* The main program shared object */
|
|
|
|
static Obj_Entry obj_rtld; /* The dynamic linker shared object */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static unsigned int obj_count; /* Number of objects in obj_list */
|
2007-04-03 18:31:20 +00:00
|
|
|
static unsigned int obj_loads; /* Number of objects in obj_list */
|
1999-08-30 01:48:19 +00:00
|
|
|
|
|
|
|
static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_global);
|
|
|
|
static Objlist list_main = /* Objects loaded at program startup */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_main);
|
2000-07-26 04:24:40 +00:00
|
|
|
static Objlist list_fini = /* Objects needing fini() calls */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_fini);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-09-12 17:04:51 +00:00
|
|
|
Elf_Sym sym_zero; /* For resolving undefined weak refs. */
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
#define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
extern Elf_Dyn _DYNAMIC;
|
1999-04-09 00:28:43 +00:00
|
|
|
#pragma weak _DYNAMIC
|
Fix the problem that surfaced with the new binutils import on sparc64
(and that is for now being worked around by a binutils patch).
The rtld code tested &_DYNAMIC against 0 to see whether rtld itself
was built as PIC or not. While the sparc64 MD code did not rely
on the preset value of the GOT slot for _DYNAMIC any more due
to previous binutils changes, it still used to not be 0, so
that this check did work. The new binutils do however initialize
this slot with 0. As a consequence, rtld would not properly initialize
itself and crash.
Fix that by introducing a new macro, RTLD_IS_DYNAMIC, to take the role
of this test. For sparc64, it is implemented using the rtld_dynamic()
code that was already there. If an architecture does not provide its
own implementation, we default to the old check.
While being there, mark _DYNAMIC as a weak symbol in the sparc64
rtld_start.S. This is needed in the LDSCRIPT case, which is however
not currently supported for want of an actual ldscript.
Sanity checked with md5 on alpha, amd64, i386 and ia64.
2004-06-18 02:01:37 +00:00
|
|
|
#ifndef RTLD_IS_DYNAMIC
|
|
|
|
#define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL)
|
|
|
|
#endif
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
int osreldate, pagesize;
|
|
|
|
|
2011-01-25 21:12:31 +00:00
|
|
|
static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
|
2011-01-08 17:11:49 +00:00
|
|
|
static int max_stack_flags;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Global declarations normally provided by crt1. The dynamic linker is
|
2000-01-09 21:13:48 +00:00
|
|
|
* not built with crt1, so we have to provide them ourselves.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
char *__progname;
|
|
|
|
char **environ;
|
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Globals to control TLS allocation.
|
|
|
|
*/
|
|
|
|
size_t tls_last_offset; /* Static TLS offset of last module */
|
|
|
|
size_t tls_last_size; /* Static TLS size of last module */
|
|
|
|
size_t tls_static_space; /* Static TLS space allocated */
|
|
|
|
int tls_dtv_generation = 1; /* Used to detect when dtv size changes */
|
|
|
|
int tls_max_index = 1; /* Largest module index allocated */
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* Fill in a DoneList with an allocation large enough to hold all of
|
|
|
|
* the currently-loaded objects. Keep this as a macro since it calls
|
|
|
|
* alloca and we want that to occur within the scope of the caller.
|
|
|
|
*/
|
|
|
|
#define donelist_init(dlp) \
|
|
|
|
((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
|
|
|
|
assert((dlp)->objs != NULL), \
|
|
|
|
(dlp)->num_alloc = obj_count, \
|
|
|
|
(dlp)->num_used = 0)
|
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
#define UTRACE_DLOPEN_START 1
|
|
|
|
#define UTRACE_DLOPEN_STOP 2
|
|
|
|
#define UTRACE_DLCLOSE_START 3
|
|
|
|
#define UTRACE_DLCLOSE_STOP 4
|
|
|
|
#define UTRACE_LOAD_OBJECT 5
|
|
|
|
#define UTRACE_UNLOAD_OBJECT 6
|
|
|
|
#define UTRACE_ADD_RUNDEP 7
|
|
|
|
#define UTRACE_PRELOAD_FINISHED 8
|
|
|
|
#define UTRACE_INIT_CALL 9
|
|
|
|
#define UTRACE_FINI_CALL 10
|
|
|
|
|
|
|
|
struct utrace_rtld {
|
|
|
|
char sig[4]; /* 'RTLD' */
|
|
|
|
int event;
|
|
|
|
void *handle;
|
|
|
|
void *mapbase; /* Used for 'parent' and 'init/fini' */
|
|
|
|
size_t mapsize;
|
|
|
|
int refcnt; /* Used for 'mode' */
|
|
|
|
char name[MAXPATHLEN];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define LD_UTRACE(e, h, mb, ms, r, n) do { \
|
|
|
|
if (ld_utrace != NULL) \
|
|
|
|
ld_utrace_log(e, h, mb, ms, r, n); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static void
|
|
|
|
ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
|
|
|
|
int refcnt, const char *name)
|
|
|
|
{
|
|
|
|
struct utrace_rtld ut;
|
|
|
|
|
|
|
|
ut.sig[0] = 'R';
|
|
|
|
ut.sig[1] = 'T';
|
|
|
|
ut.sig[2] = 'L';
|
|
|
|
ut.sig[3] = 'D';
|
|
|
|
ut.event = event;
|
|
|
|
ut.handle = handle;
|
|
|
|
ut.mapbase = mapbase;
|
|
|
|
ut.mapsize = mapsize;
|
|
|
|
ut.refcnt = refcnt;
|
|
|
|
bzero(ut.name, sizeof(ut.name));
|
|
|
|
if (name)
|
|
|
|
strlcpy(ut.name, name, sizeof(ut.name));
|
|
|
|
utrace(&ut, sizeof(ut));
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Main entry point for dynamic linking. The first argument is the
|
|
|
|
* stack pointer. The stack is expected to be laid out as described
|
|
|
|
* in the SVR4 ABI specification, Intel 386 Processor Supplement.
|
|
|
|
* Specifically, the stack pointer points to a word containing
|
|
|
|
* ARGC. Following that in the stack is a null-terminated sequence
|
|
|
|
* of pointers to argument strings. Then comes a null-terminated
|
|
|
|
* sequence of pointers to environment strings. Finally, there is a
|
|
|
|
* sequence of "auxiliary vector" entries.
|
|
|
|
*
|
|
|
|
* The second argument points to a place to store the dynamic linker's
|
1998-09-04 19:03:57 +00:00
|
|
|
* exit procedure pointer and the third to a place to store the main
|
|
|
|
* program's object.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* The return value is the main program's entry point.
|
|
|
|
*/
|
|
|
|
func_ptr_type
|
1998-09-04 19:03:57 +00:00
|
|
|
_rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Auxinfo *aux_info[AT_COUNT];
|
1998-03-07 19:24:35 +00:00
|
|
|
int i;
|
|
|
|
int argc;
|
|
|
|
char **argv;
|
|
|
|
char **env;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Auxinfo *aux;
|
|
|
|
Elf_Auxinfo *auxp;
|
1999-07-18 00:02:19 +00:00
|
|
|
const char *argv0;
|
2004-08-03 08:51:00 +00:00
|
|
|
Objlist_Entry *entry;
|
1999-08-30 01:48:19 +00:00
|
|
|
Obj_Entry *obj;
|
2000-07-26 04:24:40 +00:00
|
|
|
Obj_Entry **preload_tail;
|
|
|
|
Objlist initlist;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On entry, the dynamic linker itself has not been relocated yet.
|
|
|
|
* Be very careful not to reference any global data until after
|
|
|
|
* init_rtld has returned. It is OK to reference file-scope statics
|
|
|
|
* and string constants, and to call static and global functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Find the auxiliary vector on the stack. */
|
|
|
|
argc = *sp++;
|
|
|
|
argv = (char **) sp;
|
|
|
|
sp += argc + 1; /* Skip over arguments and NULL terminator */
|
|
|
|
env = (char **) sp;
|
|
|
|
while (*sp++ != 0) /* Skip over environment, and NULL terminator */
|
|
|
|
;
|
1998-09-04 19:03:57 +00:00
|
|
|
aux = (Elf_Auxinfo *) sp;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Digest the auxiliary vector. */
|
|
|
|
for (i = 0; i < AT_COUNT; i++)
|
|
|
|
aux_info[i] = NULL;
|
|
|
|
for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
|
|
|
|
if (auxp->a_type < AT_COUNT)
|
|
|
|
aux_info[auxp->a_type] = auxp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize and relocate ourselves. */
|
|
|
|
assert(aux_info[AT_BASE] != NULL);
|
2010-08-17 09:05:39 +00:00
|
|
|
init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
__progname = obj_rtld.path;
|
1999-07-18 00:02:19 +00:00
|
|
|
argv0 = argv[0] != NULL ? argv[0] : "(null)";
|
1998-03-07 19:24:35 +00:00
|
|
|
environ = env;
|
|
|
|
|
2003-05-31 15:24:29 +00:00
|
|
|
trust = !issetugid();
|
2003-05-31 14:45:11 +00:00
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
ld_bind_now = getenv(LD_ "BIND_NOW");
|
2007-05-17 18:00:27 +00:00
|
|
|
/*
|
|
|
|
* If the process is tainted, then we un-set the dangerous environment
|
|
|
|
* variables. The process will be marked as tainted until setuid(2)
|
|
|
|
* is called. If any child process calls setuid(2) we do not want any
|
|
|
|
* future processes to honor the potentially un-safe variables.
|
|
|
|
*/
|
|
|
|
if (!trust) {
|
2009-12-01 02:57:06 +00:00
|
|
|
if (unsetenv(LD_ "PRELOAD") || unsetenv(LD_ "LIBMAP") ||
|
|
|
|
unsetenv(LD_ "LIBRARY_PATH") || unsetenv(LD_ "LIBMAP_DISABLE") ||
|
2010-12-25 08:51:20 +00:00
|
|
|
unsetenv(LD_ "DEBUG") || unsetenv(LD_ "ELF_HINTS_PATH") ||
|
|
|
|
unsetenv(LD_ "LOADFLTR")) {
|
2009-12-01 02:57:06 +00:00
|
|
|
_rtld_error("environment corrupt; aborting");
|
|
|
|
die();
|
|
|
|
}
|
2007-05-17 18:00:27 +00:00
|
|
|
}
|
|
|
|
ld_debug = getenv(LD_ "DEBUG");
|
|
|
|
libmap_disable = getenv(LD_ "LIBMAP_DISABLE") != NULL;
|
|
|
|
libmap_override = getenv(LD_ "LIBMAP");
|
|
|
|
ld_library_path = getenv(LD_ "LIBRARY_PATH");
|
|
|
|
ld_preload = getenv(LD_ "PRELOAD");
|
2009-03-23 16:49:00 +00:00
|
|
|
ld_elf_hints_path = getenv(LD_ "ELF_HINTS_PATH");
|
2010-12-25 08:51:20 +00:00
|
|
|
ld_loadfltr = getenv(LD_ "LOADFLTR") != NULL;
|
2007-05-17 18:00:27 +00:00
|
|
|
dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
|
2009-03-23 16:49:00 +00:00
|
|
|
(ld_library_path != NULL) || (ld_preload != NULL) ||
|
2010-12-25 08:51:20 +00:00
|
|
|
(ld_elf_hints_path != NULL) || ld_loadfltr;
|
2004-03-21 01:21:26 +00:00
|
|
|
ld_tracing = getenv(LD_ "TRACE_LOADED_OBJECTS");
|
2007-01-09 17:50:05 +00:00
|
|
|
ld_utrace = getenv(LD_ "UTRACE");
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2009-03-23 16:49:00 +00:00
|
|
|
if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
|
|
|
|
ld_elf_hints_path = _PATH_ELF_HINTS;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (ld_debug != NULL && *ld_debug != '\0')
|
|
|
|
debug = 1;
|
|
|
|
dbg("%s is initialized, base address = %p", __progname,
|
|
|
|
(caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
|
1999-04-09 00:28:43 +00:00
|
|
|
dbg("RTLD dynamic = %p", obj_rtld.dynamic);
|
|
|
|
dbg("RTLD pltgot = %p", obj_rtld.pltgot);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
dbg("initializing thread locks");
|
|
|
|
lockdflt_init();
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Load the main program, or process its program header if it is
|
|
|
|
* already loaded.
|
|
|
|
*/
|
|
|
|
if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */
|
|
|
|
int fd = aux_info[AT_EXECFD]->a_un.a_val;
|
|
|
|
dbg("loading main program");
|
1999-08-30 01:50:41 +00:00
|
|
|
obj_main = map_object(fd, argv0, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
close(fd);
|
|
|
|
if (obj_main == NULL)
|
|
|
|
die();
|
2011-01-08 17:11:49 +00:00
|
|
|
max_stack_flags = obj->stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
} else { /* Main program already loaded. */
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Phdr *phdr;
|
1998-03-07 19:24:35 +00:00
|
|
|
int phnum;
|
|
|
|
caddr_t entry;
|
|
|
|
|
|
|
|
dbg("processing main program's program header");
|
|
|
|
assert(aux_info[AT_PHDR] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_PHNUM] != NULL);
|
|
|
|
phnum = aux_info[AT_PHNUM]->a_un.a_val;
|
|
|
|
assert(aux_info[AT_PHENT] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_ENTRY] != NULL);
|
|
|
|
entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
|
1999-07-18 00:02:19 +00:00
|
|
|
if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL)
|
|
|
|
die();
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
if (aux_info[AT_EXECPATH] != 0) {
|
|
|
|
char *kexecpath;
|
|
|
|
char buf[MAXPATHLEN];
|
|
|
|
|
|
|
|
kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
|
|
|
|
dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
|
|
|
|
if (kexecpath[0] == '/')
|
|
|
|
obj_main->path = kexecpath;
|
|
|
|
else if (getcwd(buf, sizeof(buf)) == NULL ||
|
|
|
|
strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
|
|
|
|
strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
|
|
|
|
obj_main->path = xstrdup(argv0);
|
|
|
|
else
|
|
|
|
obj_main->path = xstrdup(buf);
|
|
|
|
} else {
|
|
|
|
dbg("No AT_EXECPATH");
|
|
|
|
obj_main->path = xstrdup(argv0);
|
|
|
|
}
|
|
|
|
dbg("obj_main path %s", obj_main->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_main->mainprog = true;
|
1999-08-30 01:54:13 +00:00
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
if (aux_info[AT_STACKPROT] != NULL &&
|
|
|
|
aux_info[AT_STACKPROT]->a_un.a_val != 0)
|
|
|
|
stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
|
|
|
|
|
1999-08-30 01:54:13 +00:00
|
|
|
/*
|
|
|
|
* Get the actual dynamic linker pathname from the executable if
|
|
|
|
* possible. (It should always be possible.) That ensures that
|
|
|
|
* gdb will find the right dynamic linker even if a non-standard
|
|
|
|
* one is being used.
|
|
|
|
*/
|
|
|
|
if (obj_main->interp != NULL &&
|
|
|
|
strcmp(obj_main->interp, obj_rtld.path) != 0) {
|
|
|
|
free(obj_rtld.path);
|
|
|
|
obj_rtld.path = xstrdup(obj_main->interp);
|
2004-03-29 18:37:37 +00:00
|
|
|
__progname = obj_rtld.path;
|
1999-08-30 01:54:13 +00:00
|
|
|
}
|
|
|
|
|
2002-04-02 02:19:02 +00:00
|
|
|
digest_dynamic(obj_main, 0);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
linkmap_add(obj_main);
|
|
|
|
linkmap_add(&obj_rtld);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/* Link the main program into the list of objects. */
|
|
|
|
*obj_tail = obj_main;
|
|
|
|
obj_tail = &obj_main->next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count++;
|
2007-04-03 18:31:20 +00:00
|
|
|
obj_loads++;
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make sure we don't call the main program's init and fini functions. */
|
2004-03-05 08:10:19 +00:00
|
|
|
obj_main->init = obj_main->fini = (Elf_Addr)NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-05 02:36:40 +00:00
|
|
|
/* Initialize a fake symbol for resolving undefined weak references. */
|
|
|
|
sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
|
2002-04-27 05:32:51 +00:00
|
|
|
sym_zero.st_shndx = SHN_UNDEF;
|
2009-10-10 15:27:10 +00:00
|
|
|
sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2003-05-31 14:45:11 +00:00
|
|
|
if (!libmap_disable)
|
2005-02-04 02:46:41 +00:00
|
|
|
libmap_disable = (bool)lm_init(libmap_override);
|
2003-04-07 16:21:26 +00:00
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
dbg("loading LD_PRELOAD libraries");
|
|
|
|
if (load_preload_objects() == -1)
|
|
|
|
die();
|
2000-07-26 04:24:40 +00:00
|
|
|
preload_tail = obj_tail;
|
1998-09-22 02:09:56 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("loading needed objects");
|
2009-11-26 13:57:20 +00:00
|
|
|
if (load_needed_objects(obj_main, 0) == -1)
|
1998-03-07 19:24:35 +00:00
|
|
|
die();
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of all objects loaded at startup. */
|
2003-05-08 01:31:36 +00:00
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next) {
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(&list_main, obj);
|
2003-05-08 01:31:36 +00:00
|
|
|
obj->refcount++;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
dbg("checking for required versions");
|
|
|
|
if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
|
|
|
|
die();
|
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
if (ld_tracing) { /* We're done */
|
|
|
|
trace_loaded_objects(obj_main);
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
if (getenv(LD_ "DUMP_REL_PRE") != NULL) {
|
2003-06-19 03:55:38 +00:00
|
|
|
dump_relocations(obj_main);
|
|
|
|
exit (0);
|
|
|
|
}
|
|
|
|
|
2011-10-08 12:39:47 +00:00
|
|
|
/*
|
|
|
|
* Processing tls relocations requires having the tls offsets
|
|
|
|
* initialized. Prepare offsets before starting initial
|
|
|
|
* relocation processing.
|
|
|
|
*/
|
|
|
|
dbg("initializing initial thread local storage offsets");
|
2004-11-02 09:42:21 +00:00
|
|
|
STAILQ_FOREACH(entry, &list_main, link) {
|
|
|
|
/*
|
|
|
|
* Allocate all the initial objects out of the static TLS
|
|
|
|
* block even if they didn't ask for it.
|
|
|
|
*/
|
|
|
|
allocate_tls_offset(entry->obj);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (relocate_objects(obj_main,
|
2010-12-25 08:51:20 +00:00
|
|
|
ld_bind_now != NULL && *ld_bind_now != '\0', &obj_rtld, NULL) == -1)
|
1998-03-07 19:24:35 +00:00
|
|
|
die();
|
|
|
|
|
|
|
|
dbg("doing copy relocations");
|
|
|
|
if (do_copy_relocations(obj_main) == -1)
|
|
|
|
die();
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
if (getenv(LD_ "DUMP_REL_POST") != NULL) {
|
2003-06-19 03:55:38 +00:00
|
|
|
dump_relocations(obj_main);
|
|
|
|
exit (0);
|
|
|
|
}
|
|
|
|
|
2011-10-08 12:39:47 +00:00
|
|
|
/*
|
|
|
|
* Setup TLS for main thread. This must be done after the
|
|
|
|
* relocations are processed, since tls initialization section
|
|
|
|
* might be the subject for relocations.
|
|
|
|
*/
|
|
|
|
dbg("initializing initial thread local storage");
|
|
|
|
allocate_initial_tls(obj_list);
|
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
dbg("initializing key program variables");
|
|
|
|
set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
|
|
|
|
set_program_var("environ", env);
|
2010-08-17 09:08:28 +00:00
|
|
|
set_program_var("__elf_aux_vector", aux);
|
1999-04-21 04:06:57 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of init functions to call. */
|
|
|
|
objlist_init(&initlist);
|
|
|
|
initlist_add_objects(obj_list, preload_tail, &initlist);
|
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
|
1999-07-03 23:54:02 +00:00
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
map_stacks_exec(NULL);
|
2011-01-08 17:11:49 +00:00
|
|
|
|
2012-01-04 17:17:11 +00:00
|
|
|
dbg("resolving ifuncs");
|
|
|
|
if (resolve_objects_ifunc(obj_main,
|
|
|
|
ld_bind_now != NULL && *ld_bind_now != '\0', NULL) == -1)
|
|
|
|
die();
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2008-09-03 01:05:32 +00:00
|
|
|
objlist_call_init(&initlist, &lockstate);
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
2010-12-25 08:51:20 +00:00
|
|
|
dbg("loading filtees");
|
|
|
|
for (obj = obj_list->next; obj != NULL; obj = obj->next) {
|
|
|
|
if (ld_loadfltr || obj->z_loadfltr)
|
|
|
|
load_filtees(obj, 0, &lockstate);
|
|
|
|
}
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("transferring control to program entry point = %p", obj_main->entry);
|
|
|
|
|
|
|
|
/* Return the exit procedure and the program entry point. */
|
1998-09-04 19:03:57 +00:00
|
|
|
*exit_proc = rtld_exit;
|
|
|
|
*objp = obj_main;
|
1998-03-07 19:24:35 +00:00
|
|
|
return (func_ptr_type) obj_main->entry;
|
|
|
|
}
|
|
|
|
|
2011-12-12 11:03:14 +00:00
|
|
|
void *
|
|
|
|
rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
Elf_Addr target;
|
|
|
|
|
|
|
|
ptr = (void *)make_function_pointer(def, obj);
|
|
|
|
target = ((Elf_Addr (*)(void))ptr)();
|
|
|
|
return ((void *)target);
|
|
|
|
}
|
|
|
|
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr
|
2005-12-18 04:52:37 +00:00
|
|
|
_rtld_bind(Obj_Entry *obj, Elf_Size reloff)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Rel *rel;
|
|
|
|
const Elf_Sym *def;
|
1998-03-07 19:24:35 +00:00
|
|
|
const Obj_Entry *defobj;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Addr *where;
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr target;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
1998-09-04 19:03:57 +00:00
|
|
|
if (obj->pltrel)
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
|
|
|
|
else
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
|
2010-12-25 08:51:20 +00:00
|
|
|
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL,
|
|
|
|
&lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (def == NULL)
|
|
|
|
die();
|
2011-12-12 11:03:14 +00:00
|
|
|
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
|
|
|
|
target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
|
|
|
|
else
|
|
|
|
target = (Elf_Addr)(defobj->relocbase + def->st_value);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
|
|
|
|
defobj->strtab + def->st_name, basename(obj->path),
|
1999-06-25 02:53:59 +00:00
|
|
|
(void *)target, basename(defobj->path));
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
/*
|
|
|
|
* Write the new contents for the jmpslot. Note that depending on
|
|
|
|
* architecture, the value which we need to return back to the
|
|
|
|
* lazy binding trampoline may or may not be the target
|
|
|
|
* address. The value returned from reloc_jmpslot() is the value
|
|
|
|
* that the trampoline needs.
|
|
|
|
*/
|
2002-11-18 22:08:50 +00:00
|
|
|
target = reloc_jmpslot(where, target, defobj, obj, rel);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Error reporting function. Use it like printf. If formats the message
|
|
|
|
* into a buffer, and sets things up so that the next call to dlerror()
|
|
|
|
* will return the message.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_rtld_error(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
static char buf[512];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_vsnprintf(buf, sizeof buf, fmt, ap);
|
1998-03-07 19:24:35 +00:00
|
|
|
error_message = buf;
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Return a dynamically-allocated copy of the current error message, if any.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
errmsg_save(void)
|
|
|
|
{
|
|
|
|
return error_message == NULL ? NULL : xstrdup(error_message);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore the current error message from a copy which was previously saved
|
|
|
|
* by errmsg_save(). The copy is freed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
errmsg_restore(char *saved_msg)
|
|
|
|
{
|
|
|
|
if (saved_msg == NULL)
|
|
|
|
error_message = NULL;
|
|
|
|
else {
|
|
|
|
_rtld_error("%s", saved_msg);
|
|
|
|
free(saved_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static const char *
|
|
|
|
basename(const char *name)
|
|
|
|
{
|
|
|
|
const char *p = strrchr(name, '/');
|
|
|
|
return p != NULL ? p + 1 : name;
|
|
|
|
}
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
static struct utsname uts;
|
|
|
|
|
|
|
|
static int
|
|
|
|
origin_subst_one(char **res, const char *real, const char *kw, const char *subst,
|
|
|
|
char *may_free)
|
|
|
|
{
|
|
|
|
const char *p, *p1;
|
|
|
|
char *res1;
|
|
|
|
int subst_len;
|
|
|
|
int kw_len;
|
|
|
|
|
|
|
|
res1 = *res = NULL;
|
|
|
|
p = real;
|
|
|
|
subst_len = kw_len = 0;
|
|
|
|
for (;;) {
|
|
|
|
p1 = strstr(p, kw);
|
|
|
|
if (p1 != NULL) {
|
|
|
|
if (subst_len == 0) {
|
|
|
|
subst_len = strlen(subst);
|
|
|
|
kw_len = strlen(kw);
|
|
|
|
}
|
|
|
|
if (*res == NULL) {
|
|
|
|
*res = xmalloc(PATH_MAX);
|
|
|
|
res1 = *res;
|
|
|
|
}
|
|
|
|
if ((res1 - *res) + subst_len + (p1 - p) >= PATH_MAX) {
|
|
|
|
_rtld_error("Substitution of %s in %s cannot be performed",
|
|
|
|
kw, real);
|
|
|
|
if (may_free != NULL)
|
|
|
|
free(may_free);
|
|
|
|
free(res);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
memcpy(res1, p, p1 - p);
|
|
|
|
res1 += p1 - p;
|
|
|
|
memcpy(res1, subst, subst_len);
|
|
|
|
res1 += subst_len;
|
|
|
|
p = p1 + kw_len;
|
|
|
|
} else {
|
|
|
|
if (*res == NULL) {
|
|
|
|
if (may_free != NULL)
|
|
|
|
*res = may_free;
|
|
|
|
else
|
|
|
|
*res = xstrdup(real);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
*res1 = '\0';
|
|
|
|
if (may_free != NULL)
|
|
|
|
free(may_free);
|
|
|
|
if (strlcat(res1, p, PATH_MAX - (res1 - *res)) >= PATH_MAX) {
|
|
|
|
free(res);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *
|
|
|
|
origin_subst(const char *real, const char *origin_path)
|
|
|
|
{
|
|
|
|
char *res1, *res2, *res3, *res4;
|
|
|
|
|
|
|
|
if (uts.sysname[0] == '\0') {
|
|
|
|
if (uname(&uts) != 0) {
|
|
|
|
_rtld_error("utsname failed: %d", errno);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!origin_subst_one(&res1, real, "$ORIGIN", origin_path, NULL) ||
|
|
|
|
!origin_subst_one(&res2, res1, "$OSNAME", uts.sysname, res1) ||
|
|
|
|
!origin_subst_one(&res3, res2, "$OSREL", uts.release, res2) ||
|
|
|
|
!origin_subst_one(&res4, res3, "$PLATFORM", uts.machine, res3))
|
|
|
|
return (NULL);
|
|
|
|
return (res4);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static void
|
|
|
|
die(void)
|
|
|
|
{
|
|
|
|
const char *msg = dlerror();
|
|
|
|
|
|
|
|
if (msg == NULL)
|
|
|
|
msg = "Fatal error";
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_fdputstr(STDERR_FILENO, msg);
|
2012-02-13 11:15:29 +00:00
|
|
|
rtld_fdputchar(STDERR_FILENO, '\n');
|
2011-08-24 20:05:13 +00:00
|
|
|
_exit(1);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a shared object's DYNAMIC section, and save the important
|
|
|
|
* information in its Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static void
|
2010-08-17 09:05:39 +00:00
|
|
|
digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
|
|
|
|
const Elf_Dyn **dyn_soname)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Dyn *dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry **needed_tail = &obj->needed;
|
2010-12-25 08:51:20 +00:00
|
|
|
Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
|
|
|
|
Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
|
1998-09-04 19:03:57 +00:00
|
|
|
int plttype = DT_REL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_rpath = NULL;
|
|
|
|
*dyn_soname = NULL;
|
|
|
|
|
2003-12-31 15:10:41 +00:00
|
|
|
obj->bind_now = false;
|
1998-03-07 19:24:35 +00:00
|
|
|
for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
|
|
|
|
switch (dynp->d_tag) {
|
|
|
|
|
|
|
|
case DT_REL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELSZ:
|
|
|
|
obj->relsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rel));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_JMPREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->pltrel = (const Elf_Rel *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTRELSZ:
|
|
|
|
obj->pltrelsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELA:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELASZ:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->relasize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELAENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rela));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
plttype = dynp->d_un.d_val;
|
|
|
|
assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMTAB:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->symtab = (const Elf_Sym *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Sym));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRTAB:
|
|
|
|
obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRSZ:
|
|
|
|
obj->strsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
case DT_VERNEED:
|
|
|
|
obj->verneed = (const Elf_Verneed *) (obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERNEEDNUM:
|
|
|
|
obj->verneednum = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERDEF:
|
|
|
|
obj->verdef = (const Elf_Verdef *) (obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERDEFNUM:
|
|
|
|
obj->verdefnum = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERSYM:
|
|
|
|
obj->versyms = (const Elf_Versym *)(obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_HASH:
|
|
|
|
{
|
2001-10-15 18:48:42 +00:00
|
|
|
const Elf_Hashelt *hashtab = (const Elf_Hashelt *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
obj->nbuckets = hashtab[0];
|
|
|
|
obj->nchains = hashtab[1];
|
|
|
|
obj->buckets = hashtab + 2;
|
|
|
|
obj->chains = obj->buckets + obj->nbuckets;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_NEEDED:
|
1999-04-09 00:28:43 +00:00
|
|
|
if (!obj->rtld) {
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_tail = nep;
|
|
|
|
needed_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
case DT_FILTER:
|
|
|
|
if (!obj->rtld) {
|
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_filtees_tail = nep;
|
|
|
|
needed_filtees_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_AUXILIARY:
|
|
|
|
if (!obj->rtld) {
|
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_aux_filtees_tail = nep;
|
|
|
|
needed_aux_filtees_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_PLTGOT:
|
1999-04-09 00:28:43 +00:00
|
|
|
obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_TEXTREL:
|
|
|
|
obj->textrel = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMBOLIC:
|
|
|
|
obj->symbolic = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RPATH:
|
2003-06-18 03:34:29 +00:00
|
|
|
case DT_RUNPATH: /* XXX: process separately */
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* We have to wait until later to process this, because we
|
|
|
|
* might not have gotten the address of the string table yet.
|
|
|
|
*/
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_rpath = dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SONAME:
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_soname = dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_INIT:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_FINI:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
2008-04-04 20:59:26 +00:00
|
|
|
/*
|
|
|
|
* Don't process DT_DEBUG on MIPS as the dynamic section
|
|
|
|
* is mapped read-only. DT_MIPS_RLD_MAP is used instead.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __mips__
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_DEBUG:
|
|
|
|
/* XXX - not implemented yet */
|
2002-04-02 02:19:02 +00:00
|
|
|
if (!early)
|
|
|
|
dbg("Filling in DT_DEBUG entry");
|
1998-09-04 19:03:57 +00:00
|
|
|
((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
2008-04-04 20:59:26 +00:00
|
|
|
#endif
|
1998-09-04 19:03:57 +00:00
|
|
|
|
2003-06-18 03:34:29 +00:00
|
|
|
case DT_FLAGS:
|
2009-11-26 13:55:49 +00:00
|
|
|
if ((dynp->d_un.d_val & DF_ORIGIN) && trust)
|
2009-03-18 13:40:37 +00:00
|
|
|
obj->z_origin = true;
|
2003-06-18 03:34:29 +00:00
|
|
|
if (dynp->d_un.d_val & DF_SYMBOLIC)
|
|
|
|
obj->symbolic = true;
|
|
|
|
if (dynp->d_un.d_val & DF_TEXTREL)
|
|
|
|
obj->textrel = true;
|
|
|
|
if (dynp->d_un.d_val & DF_BIND_NOW)
|
|
|
|
obj->bind_now = true;
|
2011-06-18 13:56:33 +00:00
|
|
|
/*if (dynp->d_un.d_val & DF_STATIC_TLS)
|
|
|
|
;*/
|
2003-06-18 03:34:29 +00:00
|
|
|
break;
|
2008-04-04 20:59:26 +00:00
|
|
|
#ifdef __mips__
|
|
|
|
case DT_MIPS_LOCAL_GOTNO:
|
|
|
|
obj->local_gotno = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_SYMTABNO:
|
|
|
|
obj->symtabno = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_GOTSYM:
|
|
|
|
obj->gotsym = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_RLD_MAP:
|
|
|
|
#ifdef notyet
|
|
|
|
if (!early)
|
|
|
|
dbg("Filling in DT_DEBUG entry");
|
|
|
|
((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
#endif
|
2003-06-18 03:34:29 +00:00
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
case DT_FLAGS_1:
|
2009-11-26 13:57:20 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_NOOPEN)
|
|
|
|
obj->z_noopen = true;
|
2009-03-18 13:40:37 +00:00
|
|
|
if ((dynp->d_un.d_val & DF_1_ORIGIN) && trust)
|
|
|
|
obj->z_origin = true;
|
2011-06-18 13:56:33 +00:00
|
|
|
/*if (dynp->d_un.d_val & DF_1_GLOBAL)
|
|
|
|
XXX ;*/
|
2009-03-18 13:40:37 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_BIND_NOW)
|
|
|
|
obj->bind_now = true;
|
2009-03-30 08:47:28 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_NODELETE)
|
|
|
|
obj->z_nodelete = true;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_LOADFLTR)
|
|
|
|
obj->z_loadfltr = true;
|
2009-03-18 13:40:37 +00:00
|
|
|
break;
|
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
default:
|
2002-04-02 02:19:02 +00:00
|
|
|
if (!early) {
|
|
|
|
dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
|
|
|
|
(long)dynp->d_tag);
|
|
|
|
}
|
1999-09-04 20:14:48 +00:00
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->traced = false;
|
|
|
|
|
|
|
|
if (plttype == DT_RELA) {
|
|
|
|
obj->pltrela = (const Elf_Rela *) obj->pltrel;
|
|
|
|
obj->pltrel = NULL;
|
|
|
|
obj->pltrelasize = obj->pltrelsize;
|
|
|
|
obj->pltrelsize = 0;
|
|
|
|
}
|
2010-08-17 09:05:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
|
|
|
|
const Elf_Dyn *dyn_soname)
|
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
if (obj->z_origin && obj->origin_path == NULL) {
|
|
|
|
obj->origin_path = xmalloc(PATH_MAX);
|
|
|
|
if (rtld_dirname_abs(obj->path, obj->origin_path) == -1)
|
|
|
|
die();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dyn_rpath != NULL) {
|
|
|
|
obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val;
|
|
|
|
if (obj->z_origin)
|
|
|
|
obj->rpath = origin_subst(obj->rpath, obj->origin_path);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
|
|
|
|
if (dyn_soname != NULL)
|
|
|
|
object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
static void
|
|
|
|
digest_dynamic(Obj_Entry *obj, int early)
|
|
|
|
{
|
|
|
|
const Elf_Dyn *dyn_rpath;
|
|
|
|
const Elf_Dyn *dyn_soname;
|
|
|
|
|
|
|
|
digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname);
|
|
|
|
digest_dynamic2(obj, dyn_rpath, dyn_soname);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Process a shared object's program header. This is used only for the
|
|
|
|
* main program, when the kernel has already loaded the main program
|
|
|
|
* into memory before calling the dynamic linker. It creates and
|
|
|
|
* returns an Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
1999-07-18 00:02:19 +00:00
|
|
|
digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1999-08-30 01:48:19 +00:00
|
|
|
Obj_Entry *obj;
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Phdr *phlimit = phdr + phnum;
|
|
|
|
const Elf_Phdr *ph;
|
1998-03-07 19:24:35 +00:00
|
|
|
int nsegs = 0;
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
obj = obj_new();
|
1998-03-07 19:24:35 +00:00
|
|
|
for (ph = phdr; ph < phlimit; ph++) {
|
2009-10-10 15:27:10 +00:00
|
|
|
if (ph->p_type != PT_PHDR)
|
|
|
|
continue;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->phdr = phdr;
|
|
|
|
obj->phsize = ph->p_memsz;
|
|
|
|
obj->relocbase = (caddr_t)phdr - ph->p_vaddr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
obj->stack_flags = PF_X | PF_R | PF_W;
|
|
|
|
|
2009-10-10 15:27:10 +00:00
|
|
|
for (ph = phdr; ph < phlimit; ph++) {
|
|
|
|
switch (ph->p_type) {
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:54:13 +00:00
|
|
|
case PT_INTERP:
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
|
1999-08-30 01:54:13 +00:00
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case PT_LOAD:
|
|
|
|
if (nsegs == 0) { /* First load segment */
|
|
|
|
obj->vaddrbase = trunc_page(ph->p_vaddr);
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->mapbase = obj->vaddrbase + obj->relocbase;
|
1998-03-07 19:24:35 +00:00
|
|
|
obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
} else { /* Last load segment */
|
|
|
|
obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
}
|
|
|
|
nsegs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_DYNAMIC:
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
case PT_TLS:
|
|
|
|
obj->tlsindex = 1;
|
|
|
|
obj->tlssize = ph->p_memsz;
|
|
|
|
obj->tlsalign = ph->p_align;
|
|
|
|
obj->tlsinitsize = ph->p_filesz;
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
|
2004-08-03 08:51:00 +00:00
|
|
|
break;
|
2011-01-08 17:11:49 +00:00
|
|
|
|
|
|
|
case PT_GNU_STACK:
|
|
|
|
obj->stack_flags = ph->p_flags;
|
|
|
|
break;
|
2012-01-30 19:52:17 +00:00
|
|
|
|
|
|
|
case PT_GNU_RELRO:
|
|
|
|
obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
|
|
|
|
obj->relro_size = round_page(ph->p_memsz);
|
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
2002-11-29 16:41:31 +00:00
|
|
|
if (nsegs < 1) {
|
|
|
|
_rtld_error("%s: too few PT_LOAD segments", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
obj->entry = entry;
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
dlcheck(void *handle)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next)
|
|
|
|
if (obj == (Obj_Entry *) handle)
|
|
|
|
break;
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
|
1998-03-07 19:24:35 +00:00
|
|
|
_rtld_error("Invalid shared object handle %p", handle);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* If the given object is already in the donelist, return true. Otherwise
|
|
|
|
* add the object to the list and return false.
|
|
|
|
*/
|
|
|
|
static bool
|
2000-09-19 04:27:16 +00:00
|
|
|
donelist_check(DoneList *dlp, const Obj_Entry *obj)
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dlp->num_used; i++)
|
|
|
|
if (dlp->objs[i] == obj)
|
|
|
|
return true;
|
|
|
|
/*
|
|
|
|
* Our donelist allocation should always be sufficient. But if
|
|
|
|
* our threads locking isn't working properly, more shared objects
|
|
|
|
* could have been loaded since we allocated the list. That should
|
|
|
|
* never happen, but we'll handle it properly just in case it does.
|
|
|
|
*/
|
|
|
|
if (dlp->num_used < dlp->num_alloc)
|
|
|
|
dlp->objs[dlp->num_used++] = obj;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Hash function for symbol table lookup. Don't even think about changing
|
|
|
|
* this. It is specified by the System V ABI.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
unsigned long
|
1998-03-07 19:24:35 +00:00
|
|
|
elf_hash(const char *name)
|
|
|
|
{
|
|
|
|
const unsigned char *p = (const unsigned char *) name;
|
|
|
|
unsigned long h = 0;
|
|
|
|
unsigned long g;
|
|
|
|
|
|
|
|
while (*p != '\0') {
|
|
|
|
h = (h << 4) + *p++;
|
|
|
|
if ((g = h & 0xf0000000) != 0)
|
|
|
|
h ^= g >> 24;
|
|
|
|
h &= ~g;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the library with the given name, and return its full pathname.
|
|
|
|
* The returned string is dynamically allocated. Generates an error
|
|
|
|
* message and returns NULL if the library cannot be found.
|
|
|
|
*
|
|
|
|
* If the second argument is non-NULL, then it refers to an already-
|
|
|
|
* loaded shared object, whose library search path will be searched.
|
1998-09-05 03:31:00 +00:00
|
|
|
*
|
|
|
|
* The search order is:
|
|
|
|
* LD_LIBRARY_PATH
|
2003-11-14 12:56:56 +00:00
|
|
|
* rpath in the referencing file
|
1998-09-05 03:31:00 +00:00
|
|
|
* ldconfig hints
|
2003-08-17 07:55:17 +00:00
|
|
|
* /lib:/usr/lib
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
static char *
|
2003-04-07 16:21:26 +00:00
|
|
|
find_library(const char *xname, const Obj_Entry *refobj)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
char *pathname;
|
2003-04-07 16:21:26 +00:00
|
|
|
char *name;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-04-07 16:21:26 +00:00
|
|
|
if (strchr(xname, '/') != NULL) { /* Hard coded pathname */
|
|
|
|
if (xname[0] != '/' && !trust) {
|
1998-03-07 19:24:35 +00:00
|
|
|
_rtld_error("Absolute pathname required for shared object \"%s\"",
|
2003-04-07 16:21:26 +00:00
|
|
|
xname);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-03-28 15:54:08 +00:00
|
|
|
if (refobj != NULL && refobj->z_origin)
|
2009-03-18 13:40:37 +00:00
|
|
|
return origin_subst(xname, refobj->origin_path);
|
|
|
|
else
|
|
|
|
return xstrdup(xname);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2003-06-18 16:17:13 +00:00
|
|
|
if (libmap_disable || (refobj == NULL) ||
|
|
|
|
(name = lm_find(refobj->path, xname)) == NULL)
|
2003-04-07 16:21:26 +00:00
|
|
|
name = (char *)xname;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg(" Searching for \"%s\"", name);
|
|
|
|
|
2002-01-25 16:35:43 +00:00
|
|
|
if ((pathname = search_library_path(name, ld_library_path)) != NULL ||
|
|
|
|
(refobj != NULL &&
|
1998-03-07 19:24:35 +00:00
|
|
|
(pathname = search_library_path(name, refobj->rpath)) != NULL) ||
|
1999-04-09 06:42:00 +00:00
|
|
|
(pathname = search_library_path(name, gethints())) != NULL ||
|
1998-03-07 19:24:35 +00:00
|
|
|
(pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL)
|
|
|
|
return pathname;
|
|
|
|
|
2004-05-28 00:05:28 +00:00
|
|
|
if(refobj != NULL && refobj->path != NULL) {
|
|
|
|
_rtld_error("Shared object \"%s\" not found, required by \"%s\"",
|
|
|
|
name, basename(refobj->path));
|
|
|
|
} else {
|
|
|
|
_rtld_error("Shared object \"%s\" not found", name);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a symbol number in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *
|
2000-09-19 04:27:16 +00:00
|
|
|
find_symdef(unsigned long symnum, const Obj_Entry *refobj,
|
2010-12-25 08:51:20 +00:00
|
|
|
const Obj_Entry **defobj_out, int flags, SymCache *cache,
|
|
|
|
RtldLockState *lockstate)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *ref;
|
1999-08-30 01:24:08 +00:00
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req;
|
1998-03-07 19:24:35 +00:00
|
|
|
const char *name;
|
2010-12-25 08:51:20 +00:00
|
|
|
int res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
/*
|
|
|
|
* If we have already found this symbol, get the information from
|
|
|
|
* the cache.
|
|
|
|
*/
|
|
|
|
if (symnum >= refobj->nchains)
|
|
|
|
return NULL; /* Bad object */
|
|
|
|
if (cache != NULL && cache[symnum].sym != NULL) {
|
|
|
|
*defobj_out = cache[symnum].obj;
|
|
|
|
return cache[symnum].sym;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
ref = refobj->symtab + symnum;
|
|
|
|
name = refobj->strtab + ref->st_name;
|
2010-12-25 08:51:20 +00:00
|
|
|
def = NULL;
|
1999-08-30 01:25:38 +00:00
|
|
|
defobj = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2002-04-27 02:48:29 +00:00
|
|
|
/*
|
|
|
|
* We don't have to do a full scale lookup if the symbol is local.
|
|
|
|
* We know it will bind to the instance in this load module; to
|
|
|
|
* which we already have a pointer (ie ref). By not doing a lookup,
|
|
|
|
* we not only improve performance, but it also avoids unresolvable
|
|
|
|
* symbols when local symbols are not in the hash table. This has
|
|
|
|
* been seen with the ia64 toolchain.
|
|
|
|
*/
|
|
|
|
if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
|
|
|
|
if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
|
2002-04-02 02:19:02 +00:00
|
|
|
_rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
|
|
|
|
symnum);
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
|
|
|
req.flags = flags;
|
|
|
|
req.ventry = fetch_ventry(refobj, symnum);
|
|
|
|
req.lockstate = lockstate;
|
|
|
|
res = symlook_default(&req, refobj);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
2002-04-27 02:48:29 +00:00
|
|
|
} else {
|
2002-04-02 02:19:02 +00:00
|
|
|
def = ref;
|
|
|
|
defobj = refobj;
|
2002-04-27 02:48:29 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:24:08 +00:00
|
|
|
/*
|
1999-08-30 01:48:19 +00:00
|
|
|
* If we found no definition and the reference is weak, treat the
|
1999-08-30 01:24:08 +00:00
|
|
|
* symbol as having the value zero.
|
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
|
|
|
|
def = &sym_zero;
|
|
|
|
defobj = obj_main;
|
1999-04-05 02:36:40 +00:00
|
|
|
}
|
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
if (def != NULL) {
|
1999-08-30 01:48:19 +00:00
|
|
|
*defobj_out = defobj;
|
2001-05-05 23:21:05 +00:00
|
|
|
/* Record the information in the cache to avoid subsequent lookups. */
|
|
|
|
if (cache != NULL) {
|
|
|
|
cache[symnum].sym = def;
|
|
|
|
cache[symnum].obj = defobj;
|
|
|
|
}
|
2001-10-15 18:48:42 +00:00
|
|
|
} else {
|
|
|
|
if (refobj != &obj_rtld)
|
|
|
|
_rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
return def;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-09-05 03:31:00 +00:00
|
|
|
/*
|
|
|
|
* Return the search path from the ldconfig hints file, reading it if
|
|
|
|
* necessary. Returns NULL if there are problems with the hints file,
|
|
|
|
* or if the search path there is empty.
|
|
|
|
*/
|
|
|
|
static const char *
|
|
|
|
gethints(void)
|
|
|
|
{
|
|
|
|
static char *hints;
|
|
|
|
|
|
|
|
if (hints == NULL) {
|
|
|
|
int fd;
|
|
|
|
struct elfhints_hdr hdr;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
/* Keep from trying again in case the hints file is bad. */
|
|
|
|
hints = "";
|
|
|
|
|
2009-03-23 16:49:00 +00:00
|
|
|
if ((fd = open(ld_elf_hints_path, O_RDONLY)) == -1)
|
1998-09-05 03:31:00 +00:00
|
|
|
return NULL;
|
|
|
|
if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
|
|
|
|
hdr.magic != ELFHINTS_MAGIC ||
|
|
|
|
hdr.version != 1) {
|
|
|
|
close(fd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
p = xmalloc(hdr.dirlistlen + 1);
|
|
|
|
if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
|
2003-05-04 00:56:00 +00:00
|
|
|
read(fd, p, hdr.dirlistlen + 1) != (ssize_t)hdr.dirlistlen + 1) {
|
1998-09-05 03:31:00 +00:00
|
|
|
free(p);
|
|
|
|
close(fd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
hints = p;
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
return hints[0] != '\0' ? hints : NULL;
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
init_dag(Obj_Entry *root)
|
|
|
|
{
|
2011-01-28 23:44:57 +00:00
|
|
|
const Needed_Entry *needed;
|
|
|
|
const Objlist_Entry *elm;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
DoneList donelist;
|
|
|
|
|
If dlopen() is called for the dso that has been already loaded as a
dependency, then the dso never has its DAG initialized. Empty DAG
makes ref_dag() call in dlopen() a nop, and the dso refcount is off
by one.
Initialize the DAG on the first dlopen() call, using a boolean flag
to prevent double initialization.
From the PR (edited):
Assume we have a library liba.so, containing a function a(), and a
library libb.so, containing function b(). liba.so needs functionality
from libb.so, so liba.so links in libb.so.
An application doesn't know about the relation between these libraries,
but needs to call a() and b(). It dlopen()s liba.so and obtains a
pointer to a(), then it dlopen()s libb.so and obtains a pointer to b().
As soon as the application doesn't need a() anymore, it dlclose()s liba.so.
Expected result: the pointer to b() is still valid and can be called
Actual result: the pointer to b() has become invalid, even though the
application did not dlclose() the handle to libb.so. On calling b(), the
application crashes with a segmentation fault.
PR: misc/151861
Based on patch by: jh
Reviewed by: kan
Tested by: Arjan van Leeuwen <freebsd-maintainer opera com>
MFC after: 1 week
2010-11-03 09:23:08 +00:00
|
|
|
if (root->dag_inited)
|
2010-11-04 09:19:14 +00:00
|
|
|
return;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
donelist_init(&donelist);
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
/* Root object belongs to own DAG. */
|
|
|
|
objlist_push_tail(&root->dldags, root);
|
|
|
|
objlist_push_tail(&root->dagmembers, root);
|
|
|
|
donelist_check(&donelist, root);
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
/*
|
|
|
|
* Add dependencies of root object to DAG in breadth order
|
|
|
|
* by exploiting the fact that each new object get added
|
|
|
|
* to the tail of the dagmembers list.
|
|
|
|
*/
|
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
|
|
|
for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
|
|
|
|
if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
|
|
|
|
continue;
|
|
|
|
objlist_push_tail(&needed->obj->dldags, root);
|
|
|
|
objlist_push_tail(&root->dagmembers, needed->obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
root->dag_inited = true;
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Initialize the dynamic linker. The argument is the address at which
|
|
|
|
* the dynamic linker has been mapped into memory. The primary task of
|
|
|
|
* this function is to relocate the dynamic linker.
|
|
|
|
*/
|
|
|
|
static void
|
2010-08-17 09:05:39 +00:00
|
|
|
init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2002-04-02 02:19:02 +00:00
|
|
|
Obj_Entry objtmp; /* Temporary rtld object */
|
2010-08-17 09:05:39 +00:00
|
|
|
const Elf_Dyn *dyn_rpath;
|
|
|
|
const Elf_Dyn *dyn_soname;
|
2002-04-02 02:19:02 +00:00
|
|
|
|
1998-09-15 21:07:52 +00:00
|
|
|
/*
|
|
|
|
* Conjure up an Obj_Entry structure for the dynamic linker.
|
|
|
|
*
|
2009-06-23 09:50:50 +00:00
|
|
|
* The "path" member can't be initialized yet because string constants
|
2009-06-23 14:12:49 +00:00
|
|
|
* cannot yet be accessed. Below we will set it correctly.
|
1998-09-15 21:07:52 +00:00
|
|
|
*/
|
2003-12-31 15:10:41 +00:00
|
|
|
memset(&objtmp, 0, sizeof(objtmp));
|
2002-04-02 02:19:02 +00:00
|
|
|
objtmp.path = NULL;
|
|
|
|
objtmp.rtld = true;
|
|
|
|
objtmp.mapbase = mapbase;
|
1999-04-09 00:28:43 +00:00
|
|
|
#ifdef PIC
|
2002-04-02 02:19:02 +00:00
|
|
|
objtmp.relocbase = mapbase;
|
1998-09-04 19:03:57 +00:00
|
|
|
#endif
|
Fix the problem that surfaced with the new binutils import on sparc64
(and that is for now being worked around by a binutils patch).
The rtld code tested &_DYNAMIC against 0 to see whether rtld itself
was built as PIC or not. While the sparc64 MD code did not rely
on the preset value of the GOT slot for _DYNAMIC any more due
to previous binutils changes, it still used to not be 0, so
that this check did work. The new binutils do however initialize
this slot with 0. As a consequence, rtld would not properly initialize
itself and crash.
Fix that by introducing a new macro, RTLD_IS_DYNAMIC, to take the role
of this test. For sparc64, it is implemented using the rtld_dynamic()
code that was already there. If an architecture does not provide its
own implementation, we default to the old check.
While being there, mark _DYNAMIC as a weak symbol in the sparc64
rtld_start.S. This is needed in the LDSCRIPT case, which is however
not currently supported for want of an actual ldscript.
Sanity checked with md5 on alpha, amd64, i386 and ia64.
2004-06-18 02:01:37 +00:00
|
|
|
if (RTLD_IS_DYNAMIC()) {
|
2002-04-02 02:19:02 +00:00
|
|
|
objtmp.dynamic = rtld_dynamic(&objtmp);
|
2010-08-17 09:05:39 +00:00
|
|
|
digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname);
|
2002-04-02 02:19:02 +00:00
|
|
|
assert(objtmp.needed == NULL);
|
2008-04-04 20:59:26 +00:00
|
|
|
#if !defined(__mips__)
|
2011-01-28 17:30:24 +00:00
|
|
|
/* MIPS has a bogus DT_TEXTREL. */
|
2002-04-02 02:19:02 +00:00
|
|
|
assert(!objtmp.textrel);
|
2008-04-04 20:59:26 +00:00
|
|
|
#endif
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
/*
|
|
|
|
* Temporarily put the dynamic linker entry into the object list, so
|
|
|
|
* that symbols can be found.
|
|
|
|
*/
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
relocate_objects(&objtmp, true, &objtmp, NULL);
|
1999-04-09 00:28:43 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2002-04-02 02:19:02 +00:00
|
|
|
/* Initialize the object list. */
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_tail = &obj_list;
|
2002-04-02 02:19:02 +00:00
|
|
|
|
|
|
|
/* Now that non-local variables can be accesses, copy out obj_rtld. */
|
|
|
|
memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
if (aux_info[AT_PAGESZ] != NULL)
|
|
|
|
pagesize = aux_info[AT_PAGESZ]->a_un.a_val;
|
|
|
|
if (aux_info[AT_OSRELDATE] != NULL)
|
|
|
|
osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
|
|
|
|
|
|
|
|
digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname);
|
|
|
|
|
1998-09-15 21:07:52 +00:00
|
|
|
/* Replace the path with a dynamically allocated copy. */
|
2002-04-02 02:19:02 +00:00
|
|
|
obj_rtld.path = xstrdup(PATH_RTLD);
|
1998-09-15 21:07:52 +00:00
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
r_debug.r_brk = r_debug_state;
|
|
|
|
r_debug.r_state = RT_CONSISTENT;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Add the init functions from a needed object list (and its recursive
|
|
|
|
* needed objects) to "list". This is not used directly; it is a helper
|
|
|
|
* function for initlist_add_objects(). The write lock must be held
|
|
|
|
* when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initlist_add_neededs(Needed_Entry *needed, Objlist *list)
|
|
|
|
{
|
|
|
|
/* Recursively process the successor needed objects. */
|
|
|
|
if (needed->next != NULL)
|
|
|
|
initlist_add_neededs(needed->next, list);
|
|
|
|
|
|
|
|
/* Process the current needed object. */
|
|
|
|
if (needed->obj != NULL)
|
|
|
|
initlist_add_objects(needed->obj, &needed->obj->next, list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan all of the DAGs rooted in the range of objects from "obj" to
|
|
|
|
* "tail" and add their init functions to "list". This recurses over
|
|
|
|
* the DAGs and ensure the proper init ordering such that each object's
|
|
|
|
* needed libraries are initialized before the object itself. At the
|
|
|
|
* same time, this function adds the objects to the global finalization
|
|
|
|
* list "list_fini" in the opposite order. The write lock must be
|
|
|
|
* held when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list)
|
|
|
|
{
|
2009-06-20 14:16:41 +00:00
|
|
|
if (obj->init_scanned || obj->init_done)
|
2000-07-26 04:24:40 +00:00
|
|
|
return;
|
2009-06-20 14:16:41 +00:00
|
|
|
obj->init_scanned = true;
|
2000-07-26 04:24:40 +00:00
|
|
|
|
|
|
|
/* Recursively process the successor objects. */
|
|
|
|
if (&obj->next != tail)
|
|
|
|
initlist_add_objects(obj->next, tail, list);
|
|
|
|
|
|
|
|
/* Recursively process the needed objects. */
|
|
|
|
if (obj->needed != NULL)
|
|
|
|
initlist_add_neededs(obj->needed, list);
|
|
|
|
|
|
|
|
/* Add the object to the init list. */
|
2004-03-05 08:10:19 +00:00
|
|
|
if (obj->init != (Elf_Addr)NULL)
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(list, obj);
|
|
|
|
|
|
|
|
/* Add the object to the global fini list in the reverse order. */
|
2009-06-20 14:16:41 +00:00
|
|
|
if (obj->fini != (Elf_Addr)NULL && !obj->on_fini_list) {
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_head(&list_fini, obj);
|
2009-06-20 14:16:41 +00:00
|
|
|
obj->on_fini_list = true;
|
|
|
|
}
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
#ifndef FPTR_TARGET
|
|
|
|
#define FPTR_TARGET(f) ((Elf_Addr) (f))
|
|
|
|
#endif
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static void
|
|
|
|
free_needed_filtees(Needed_Entry *n)
|
|
|
|
{
|
|
|
|
Needed_Entry *needed, *needed1;
|
|
|
|
|
|
|
|
for (needed = n; needed != NULL; needed = needed->next) {
|
|
|
|
if (needed->obj != NULL) {
|
|
|
|
dlclose(needed->obj);
|
|
|
|
needed->obj = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (needed = n; needed != NULL; needed = needed1) {
|
|
|
|
needed1 = needed->next;
|
|
|
|
free(needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unload_filtees(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
free_needed_filtees(obj->needed_filtees);
|
|
|
|
obj->needed_filtees = NULL;
|
|
|
|
free_needed_filtees(obj->needed_aux_filtees);
|
|
|
|
obj->needed_aux_filtees = NULL;
|
|
|
|
obj->filtees_loaded = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (; needed != NULL; needed = needed->next) {
|
2012-01-07 10:33:01 +00:00
|
|
|
needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
|
2010-12-25 08:51:20 +00:00
|
|
|
flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
|
|
|
|
RTLD_LOCAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
|
|
|
|
lock_restart_for_upgrade(lockstate);
|
|
|
|
if (!obj->filtees_loaded) {
|
|
|
|
load_filtee1(obj, obj->needed_filtees, flags);
|
|
|
|
load_filtee1(obj, obj->needed_aux_filtees, flags);
|
|
|
|
obj->filtees_loaded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj1;
|
|
|
|
|
|
|
|
for (; needed != NULL; needed = needed->next) {
|
2012-01-07 10:33:01 +00:00
|
|
|
obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
|
2010-12-25 08:51:20 +00:00
|
|
|
flags & ~RTLD_LO_NOLOAD);
|
|
|
|
if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
|
|
|
|
return (-1);
|
|
|
|
if (obj1 != NULL && obj1->z_nodelete && !obj1->ref_nodel) {
|
|
|
|
dbg("obj %s nodelete", obj1->path);
|
|
|
|
init_dag(obj1);
|
|
|
|
ref_dag(obj1);
|
|
|
|
obj1->ref_nodel = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Given a shared object, traverse its list of needed objects, and load
|
|
|
|
* each of them. Returns 0 on success. Generates an error message and
|
|
|
|
* returns -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
2009-11-26 13:57:20 +00:00
|
|
|
load_needed_objects(Obj_Entry *first, int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
Obj_Entry *obj;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
for (obj = first; obj != NULL; obj = obj->next) {
|
2010-12-25 08:51:20 +00:00
|
|
|
if (process_needed(obj, obj->needed, flags) == -1)
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
return (0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
static int
|
|
|
|
load_preload_objects(void)
|
|
|
|
{
|
|
|
|
char *p = ld_preload;
|
2000-01-22 22:20:05 +00:00
|
|
|
static const char delim[] = " \t:;";
|
1998-09-22 02:09:56 +00:00
|
|
|
|
|
|
|
if (p == NULL)
|
2004-03-05 08:10:19 +00:00
|
|
|
return 0;
|
1998-09-22 02:09:56 +00:00
|
|
|
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
while (*p != '\0') {
|
2000-01-22 22:20:05 +00:00
|
|
|
size_t len = strcspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
char savech;
|
|
|
|
|
|
|
|
savech = p[len];
|
|
|
|
p[len] = '\0';
|
2012-01-07 10:33:01 +00:00
|
|
|
if (load_object(p, -1, NULL, 0) == NULL)
|
1998-09-22 02:09:56 +00:00
|
|
|
return -1; /* XXX - cleanup */
|
|
|
|
p[len] = savech;
|
|
|
|
p += len;
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
}
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
|
1998-09-22 02:09:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
static const char *
|
|
|
|
printable_path(const char *path)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (path == NULL ? "<unknown>" : path);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
2012-01-07 10:33:01 +00:00
|
|
|
* Load a shared object into memory, if it is not already loaded. The
|
|
|
|
* object may be specified by name or by user-supplied file descriptor
|
|
|
|
* fd_u. In the later case, the fd_u descriptor is not closed, but its
|
|
|
|
* duplicate is.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* Returns a pointer to the Obj_Entry for the object. Returns NULL
|
|
|
|
* on failure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
2012-01-07 10:33:01 +00:00
|
|
|
load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2012-01-07 10:33:01 +00:00
|
|
|
int fd;
|
1999-08-30 01:50:41 +00:00
|
|
|
struct stat sb;
|
2005-12-18 19:43:33 +00:00
|
|
|
char *path;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
if (name != NULL) {
|
|
|
|
for (obj = obj_list->next; obj != NULL; obj = obj->next) {
|
|
|
|
if (object_match_name(obj, name))
|
|
|
|
return (obj);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
path = find_library(name, refobj);
|
|
|
|
if (path == NULL)
|
|
|
|
return (NULL);
|
|
|
|
} else
|
|
|
|
path = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:50:41 +00:00
|
|
|
/*
|
2012-01-07 10:33:01 +00:00
|
|
|
* If we didn't find a match by pathname, or the name is not
|
|
|
|
* supplied, open the file and check again by device and inode.
|
|
|
|
* This avoids false mismatches caused by multiple links or ".."
|
|
|
|
* in pathnames.
|
1999-08-30 01:50:41 +00:00
|
|
|
*
|
|
|
|
* To avoid a race, we open the file and use fstat() rather than
|
|
|
|
* using stat().
|
|
|
|
*/
|
2012-01-07 10:33:01 +00:00
|
|
|
fd = -1;
|
|
|
|
if (fd_u == -1) {
|
|
|
|
if ((fd = open(path, O_RDONLY)) == -1) {
|
|
|
|
_rtld_error("Cannot open \"%s\"", path);
|
|
|
|
free(path);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fd = dup(fd_u);
|
|
|
|
if (fd == -1) {
|
|
|
|
_rtld_error("Cannot dup fd");
|
|
|
|
free(path);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
|
|
|
if (fstat(fd, &sb) == -1) {
|
2012-01-07 10:33:01 +00:00
|
|
|
_rtld_error("Cannot fstat \"%s\"", printable_path(path));
|
2005-12-18 19:43:33 +00:00
|
|
|
close(fd);
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-03-25 18:23:10 +00:00
|
|
|
for (obj = obj_list->next; obj != NULL; obj = obj->next)
|
|
|
|
if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
|
2005-12-18 19:43:33 +00:00
|
|
|
break;
|
2012-01-07 10:33:01 +00:00
|
|
|
if (obj != NULL && name != NULL) {
|
2005-12-18 19:43:33 +00:00
|
|
|
object_add_name(obj, name);
|
|
|
|
free(path);
|
|
|
|
close(fd);
|
|
|
|
return obj;
|
|
|
|
}
|
2010-06-03 00:25:43 +00:00
|
|
|
if (flags & RTLD_LO_NOLOAD) {
|
|
|
|
free(path);
|
2011-11-18 09:55:47 +00:00
|
|
|
close(fd);
|
2009-07-17 19:45:42 +00:00
|
|
|
return (NULL);
|
2010-06-03 00:25:43 +00:00
|
|
|
}
|
1999-08-30 01:50:41 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
/* First use of this object, so we must map it in */
|
2009-11-26 13:57:20 +00:00
|
|
|
obj = do_load_object(fd, name, path, &sb, flags);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj == NULL)
|
|
|
|
free(path);
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
2009-11-26 13:57:20 +00:00
|
|
|
do_load_object(int fd, const char *name, char *path, struct stat *sbp,
|
|
|
|
int flags)
|
2005-12-18 19:43:33 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
struct statfs fs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* but first, make sure that environment variables haven't been
|
|
|
|
* used to circumvent the noexec flag on a filesystem.
|
|
|
|
*/
|
|
|
|
if (dangerous_ld_env) {
|
|
|
|
if (fstatfs(fd, &fs) != 0) {
|
2012-01-07 10:33:01 +00:00
|
|
|
_rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
|
|
|
|
return NULL;
|
2005-03-24 10:12:29 +00:00
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
if (fs.f_flags & MNT_NOEXEC) {
|
|
|
|
_rtld_error("Cannot execute objects on %s\n", fs.f_mntonname);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2012-01-07 10:33:01 +00:00
|
|
|
dbg("loading \"%s\"", printable_path(path));
|
|
|
|
obj = map_object(fd, printable_path(path), sbp);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj == NULL)
|
|
|
|
return NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
/*
|
|
|
|
* If DT_SONAME is present in the object, digest_dynamic2 already
|
|
|
|
* added it to the object names.
|
|
|
|
*/
|
|
|
|
if (name != NULL)
|
|
|
|
object_add_name(obj, name);
|
2005-12-18 19:43:33 +00:00
|
|
|
obj->path = path;
|
|
|
|
digest_dynamic(obj, 0);
|
2009-11-28 14:29:32 +00:00
|
|
|
if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
|
|
|
|
RTLD_LO_DLOPEN) {
|
2009-11-26 13:57:20 +00:00
|
|
|
dbg("refusing to load non-loadable \"%s\"", obj->path);
|
2009-11-28 14:29:32 +00:00
|
|
|
_rtld_error("Cannot dlopen non-loadable %s", obj->path);
|
2009-11-26 13:57:20 +00:00
|
|
|
munmap(obj->mapbase, obj->mapsize);
|
|
|
|
obj_free(obj);
|
|
|
|
return (NULL);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
*obj_tail = obj;
|
|
|
|
obj_tail = &obj->next;
|
|
|
|
obj_count++;
|
2007-04-03 18:31:20 +00:00
|
|
|
obj_loads++;
|
2005-12-18 19:43:33 +00:00
|
|
|
linkmap_add(obj); /* for GDB & dlinfo() */
|
2011-01-08 17:11:49 +00:00
|
|
|
max_stack_flags |= obj->stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
dbg(" %p .. %p: %s", obj->mapbase,
|
|
|
|
obj->mapbase + obj->mapsize - 1, obj->path);
|
|
|
|
if (obj->textrel)
|
|
|
|
dbg(" WARNING: %s has impure text", obj->path);
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
|
|
|
|
obj->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
obj_from_addr(const void *addr)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next) {
|
|
|
|
if (addr < (void *) obj->mapbase)
|
|
|
|
continue;
|
2004-02-25 17:06:16 +00:00
|
|
|
if (addr < (void *) (obj->mapbase + obj->mapsize))
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Call the finalization functions for each of the objects in "list"
|
2010-12-16 16:56:44 +00:00
|
|
|
* belonging to the DAG of "root" and referenced once. If NULL "root"
|
|
|
|
* is specified, every finalization function will be called regardless
|
|
|
|
* of the reference count and the list elements won't be freed. All of
|
|
|
|
* the objects are expected to have non-NULL fini functions.
|
2000-07-26 04:24:40 +00:00
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
2010-12-25 08:51:20 +00:00
|
|
|
objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
2010-12-16 16:56:44 +00:00
|
|
|
Objlist_Entry *elm;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2010-12-16 16:56:44 +00:00
|
|
|
assert(root == NULL || root->refcount == 1);
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since a fini function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2010-12-16 16:56:44 +00:00
|
|
|
do {
|
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
|
|
|
if (root != NULL && (elm->obj->refcount != 1 ||
|
|
|
|
objlist_find(&root->dagmembers, elm->obj) == NULL))
|
|
|
|
continue;
|
2001-10-29 10:10:10 +00:00
|
|
|
dbg("calling fini function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->fini);
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini, 0, 0,
|
|
|
|
elm->obj->path);
|
2009-06-20 14:16:41 +00:00
|
|
|
/* Remove object from fini list to prevent recursive invocation. */
|
|
|
|
STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
|
2010-12-16 16:56:44 +00:00
|
|
|
/*
|
|
|
|
* XXX: If a dlopen() call references an object while the
|
|
|
|
* fini function is in progress, we might end up trying to
|
|
|
|
* unload the referenced object in dlclose() or the object
|
|
|
|
* won't be unloaded although its fini function has been
|
|
|
|
* called.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
2001-10-29 10:10:10 +00:00
|
|
|
call_initfini_pointer(elm->obj, elm->obj->fini);
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, lockstate);
|
2009-06-20 14:16:41 +00:00
|
|
|
/* No need to free anything if process is going down. */
|
2010-12-16 16:56:44 +00:00
|
|
|
if (root != NULL)
|
2009-06-20 14:16:41 +00:00
|
|
|
free(elm);
|
2010-12-16 16:56:44 +00:00
|
|
|
/*
|
|
|
|
* We must restart the list traversal after every fini call
|
|
|
|
* because a dlclose() call from the fini function or from
|
|
|
|
* another thread might have modified the reference counts.
|
|
|
|
*/
|
|
|
|
break;
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
2010-12-16 16:56:44 +00:00
|
|
|
} while (elm != NULL);
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the initialization functions for each of the objects in
|
|
|
|
* "list". All of the objects are expected to have non-NULL init
|
|
|
|
* functions.
|
|
|
|
*/
|
|
|
|
static void
|
2010-12-25 08:51:20 +00:00
|
|
|
objlist_call_init(Objlist *list, RtldLockState *lockstate)
|
2000-07-26 04:24:40 +00:00
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
2009-06-20 14:16:41 +00:00
|
|
|
Obj_Entry *obj;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
2000-07-26 04:24:40 +00:00
|
|
|
|
2009-06-20 14:16:41 +00:00
|
|
|
/*
|
|
|
|
* Clean init_scanned flag so that objects can be rechecked and
|
|
|
|
* possibly initialized earlier if any of vectors called below
|
|
|
|
* cause the change by using dlopen.
|
|
|
|
*/
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next)
|
|
|
|
obj->init_scanned = false;
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since an init function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2000-07-26 04:24:40 +00:00
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
2009-06-20 14:16:41 +00:00
|
|
|
if (elm->obj->init_done) /* Initialized early. */
|
|
|
|
continue;
|
2001-10-29 10:10:10 +00:00
|
|
|
dbg("calling init function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->init);
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init, 0, 0,
|
|
|
|
elm->obj->path);
|
2009-06-20 14:16:41 +00:00
|
|
|
/*
|
|
|
|
* Race: other thread might try to use this object before current
|
|
|
|
* one completes the initilization. Not much can be done here
|
|
|
|
* without better locking.
|
|
|
|
*/
|
|
|
|
elm->obj->init_done = true;
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
2001-10-29 10:10:10 +00:00
|
|
|
call_initfini_pointer(elm->obj, elm->obj->init);
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, lockstate);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_clear(Objlist *list)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
while (!STAILQ_EMPTY(list)) {
|
|
|
|
elm = STAILQ_FIRST(list);
|
|
|
|
STAILQ_REMOVE_HEAD(list, link);
|
|
|
|
free(elm);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static Objlist_Entry *
|
|
|
|
objlist_find(Objlist *list, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(elm, list, link)
|
|
|
|
if (elm->obj == obj)
|
|
|
|
return elm;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
static void
|
|
|
|
objlist_init(Objlist *list)
|
|
|
|
{
|
|
|
|
STAILQ_INIT(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_head(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_HEAD(list, elm, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_tail(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_TAIL(list, elm, link);
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
objlist_remove(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
if ((elm = objlist_find(list, obj)) != NULL) {
|
2000-05-26 02:09:24 +00:00
|
|
|
STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
|
1999-08-30 01:48:19 +00:00
|
|
|
free(elm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Relocate newly-loaded shared objects. The argument is a pointer to
|
|
|
|
* the Obj_Entry for the first such object. All objects from the first
|
|
|
|
* to the end of the list of objects are relocated. Returns 0 on success,
|
|
|
|
* or -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
2010-12-25 08:51:20 +00:00
|
|
|
relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
|
|
|
|
RtldLockState *lockstate)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = first; obj != NULL; obj = obj->next) {
|
2002-04-02 02:19:02 +00:00
|
|
|
if (obj != rtldobj)
|
1998-09-22 02:09:56 +00:00
|
|
|
dbg("relocating \"%s\"", obj->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (obj->nbuckets == 0 || obj->nchains == 0 || obj->buckets == NULL ||
|
|
|
|
obj->symtab == NULL || obj->strtab == NULL) {
|
|
|
|
_rtld_error("%s: Shared object has no run-time symbol table",
|
|
|
|
obj->path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->textrel) {
|
|
|
|
/* There are relocations to the write-protected text segment. */
|
|
|
|
if (mprotect(obj->mapbase, obj->textsize,
|
|
|
|
PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
|
|
|
|
_rtld_error("%s: Cannot write-enable text segment: %s",
|
|
|
|
obj->path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process the non-PLT relocations. */
|
2010-12-25 08:51:20 +00:00
|
|
|
if (reloc_non_plt(obj, rtldobj, lockstate))
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (obj->textrel) { /* Re-protected the text segment. */
|
|
|
|
if (mprotect(obj->mapbase, obj->textsize,
|
|
|
|
PROT_READ|PROT_EXEC) == -1) {
|
|
|
|
_rtld_error("%s: Cannot write-protect text segment: %s",
|
|
|
|
obj->path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-12 11:03:14 +00:00
|
|
|
|
|
|
|
/* Set the special PLT or GOT entries. */
|
|
|
|
init_pltgot(obj);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/* Process the PLT relocations. */
|
2000-01-29 01:27:04 +00:00
|
|
|
if (reloc_plt(obj) == -1)
|
|
|
|
return -1;
|
|
|
|
/* Relocate the jump slots if we are doing immediate binding. */
|
2003-06-18 03:34:29 +00:00
|
|
|
if (obj->bind_now || bind_now)
|
2010-12-25 08:51:20 +00:00
|
|
|
if (reloc_jmpslots(obj, lockstate) == -1)
|
1998-09-04 19:03:57 +00:00
|
|
|
return -1;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-01-30 19:52:17 +00:00
|
|
|
if (obj->relro_size > 0) {
|
|
|
|
if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) {
|
|
|
|
_rtld_error("%s: Cannot enforce relro protection: %s",
|
|
|
|
obj->path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Set up the magic number and version in the Obj_Entry. These
|
|
|
|
* were checked in the crt1.o from the original ElfKit, so we
|
|
|
|
* set them for backward compatibility.
|
|
|
|
*/
|
|
|
|
obj->magic = RTLD_MAGIC;
|
|
|
|
obj->version = RTLD_VERSION;
|
|
|
|
}
|
|
|
|
|
2011-12-14 16:47:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The handling of R_MACHINE_IRELATIVE relocations and jumpslots
|
|
|
|
* referencing STT_GNU_IFUNC symbols is postponed till the other
|
|
|
|
* relocations are done. The indirect functions specified as
|
|
|
|
* ifunc are allowed to call other symbols, so we need to have
|
|
|
|
* objects relocated before asking for resolution from indirects.
|
|
|
|
*
|
|
|
|
* The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
|
|
|
|
* instead of the usual lazy handling of PLT slots. It is
|
|
|
|
* consistent with how GNU does it.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
resolve_object_ifunc(Obj_Entry *obj, bool bind_now, RtldLockState *lockstate)
|
|
|
|
{
|
2011-12-12 11:03:14 +00:00
|
|
|
if (obj->irelative && reloc_iresolve(obj, lockstate) == -1)
|
2011-12-14 16:47:53 +00:00
|
|
|
return (-1);
|
2011-12-12 11:03:14 +00:00
|
|
|
if ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
|
2011-12-14 16:47:53 +00:00
|
|
|
reloc_gnu_ifunc(obj, lockstate) == -1)
|
|
|
|
return (-1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
resolve_objects_ifunc(Obj_Entry *first, bool bind_now, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = first; obj != NULL; obj = obj->next) {
|
|
|
|
if (resolve_object_ifunc(obj, bind_now, lockstate) == -1)
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
initlist_objects_ifunc(Objlist *list, bool bind_now, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
|
|
|
if (resolve_object_ifunc(elm->obj, bind_now, lockstate) == -1)
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup procedure. It will be called (by the atexit mechanism) just
|
|
|
|
* before the process exits.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rtld_exit(void)
|
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("rtld_exit()");
|
2010-12-16 16:56:44 +00:00
|
|
|
objlist_call_fini(&list_fini, NULL, &lockstate);
|
2000-07-26 04:24:40 +00:00
|
|
|
/* No need to remove the items from the list, since we are exiting. */
|
2003-05-31 14:45:11 +00:00
|
|
|
if (!libmap_disable)
|
|
|
|
lm_fini();
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *
|
|
|
|
path_enumerate(const char *path, path_enum_proc callback, void *arg)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2004-03-21 01:21:26 +00:00
|
|
|
#ifdef COMPAT_32BIT
|
|
|
|
const char *trans;
|
|
|
|
#endif
|
2003-02-13 17:47:44 +00:00
|
|
|
if (path == NULL)
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
path += strspn(path, ":;");
|
|
|
|
while (*path != '\0') {
|
|
|
|
size_t len;
|
|
|
|
char *res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
len = strcspn(path, ":;");
|
2004-03-21 01:21:26 +00:00
|
|
|
#ifdef COMPAT_32BIT
|
|
|
|
trans = lm_findn(NULL, path, len);
|
|
|
|
if (trans)
|
|
|
|
res = callback(trans, strlen(trans), arg);
|
|
|
|
else
|
|
|
|
#endif
|
2003-02-13 17:47:44 +00:00
|
|
|
res = callback(path, len, arg);
|
|
|
|
|
|
|
|
if (res != NULL)
|
|
|
|
return (res);
|
|
|
|
|
|
|
|
path += len;
|
|
|
|
path += strspn(path, ":;");
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct try_library_args {
|
|
|
|
const char *name;
|
|
|
|
size_t namelen;
|
|
|
|
char *buffer;
|
|
|
|
size_t buflen;
|
|
|
|
};
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *
|
|
|
|
try_library_path(const char *dir, size_t dirlen, void *param)
|
|
|
|
{
|
|
|
|
struct try_library_args *arg;
|
|
|
|
|
|
|
|
arg = param;
|
|
|
|
if (*dir == '/' || trust) {
|
|
|
|
char *pathname;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
pathname = arg->buffer;
|
|
|
|
strncpy(pathname, dir, dirlen);
|
|
|
|
pathname[dirlen] = '/';
|
|
|
|
strcpy(pathname + dirlen + 1, arg->name);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
dbg(" Trying \"%s\"", pathname);
|
|
|
|
if (access(pathname, F_OK) == 0) { /* We found it */
|
|
|
|
pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
|
|
|
|
strcpy(pathname, arg->buffer);
|
|
|
|
return (pathname);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static char *
|
|
|
|
search_library_path(const char *name, const char *path)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
struct try_library_args arg;
|
|
|
|
|
|
|
|
if (path == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
arg.name = name;
|
|
|
|
arg.namelen = strlen(name);
|
|
|
|
arg.buffer = xmalloc(PATH_MAX);
|
|
|
|
arg.buflen = PATH_MAX;
|
|
|
|
|
|
|
|
p = path_enumerate(path, try_library_path, &arg);
|
|
|
|
|
|
|
|
free(arg.buffer);
|
|
|
|
|
|
|
|
return (p);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dlclose(void *handle)
|
|
|
|
{
|
1999-12-27 04:44:04 +00:00
|
|
|
Obj_Entry *root;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
1999-12-27 04:44:04 +00:00
|
|
|
root = dlcheck(handle);
|
|
|
|
if (root == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
|
|
|
|
root->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-01-09 21:13:48 +00:00
|
|
|
/* Unreference the object and its dependencies. */
|
1998-03-07 19:24:35 +00:00
|
|
|
root->dl_refcount--;
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2010-12-16 16:56:44 +00:00
|
|
|
if (root->refcount == 1) {
|
2000-01-09 21:13:48 +00:00
|
|
|
/*
|
2010-12-16 16:56:44 +00:00
|
|
|
* The object will be no longer referenced, so we must unload it.
|
2008-09-03 01:05:32 +00:00
|
|
|
* First, call the fini functions.
|
2000-01-09 21:13:48 +00:00
|
|
|
*/
|
2010-12-16 16:56:44 +00:00
|
|
|
objlist_call_fini(&list_fini, root, &lockstate);
|
|
|
|
|
|
|
|
unref_dag(root);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
|
|
|
/* Finish cleaning up the newly-unreferenced objects. */
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_DELETE,&root->linkmap);
|
2000-01-09 21:13:48 +00:00
|
|
|
unload_object(root);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,NULL);
|
2010-12-16 16:56:44 +00:00
|
|
|
} else
|
|
|
|
unref_dag(root);
|
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-24 15:59:51 +00:00
|
|
|
char *
|
1998-03-07 19:24:35 +00:00
|
|
|
dlerror(void)
|
|
|
|
{
|
|
|
|
char *msg = error_message;
|
|
|
|
error_message = NULL;
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* This function is deprecated and has no effect.
|
|
|
|
*/
|
1999-12-27 04:44:04 +00:00
|
|
|
void
|
|
|
|
dllockinit(void *context,
|
|
|
|
void *(*lock_create)(void *context),
|
|
|
|
void (*rlock_acquire)(void *lock),
|
|
|
|
void (*wlock_acquire)(void *lock),
|
|
|
|
void (*lock_release)(void *lock),
|
|
|
|
void (*lock_destroy)(void *lock),
|
|
|
|
void (*context_destroy)(void *context))
|
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static void *cur_context;
|
|
|
|
static void (*cur_context_destroy)(void *);
|
|
|
|
|
|
|
|
/* Just destroy the context from the previous call, if necessary. */
|
|
|
|
if (cur_context_destroy != NULL)
|
|
|
|
cur_context_destroy(cur_context);
|
|
|
|
cur_context = context;
|
|
|
|
cur_context_destroy = context_destroy;
|
2000-01-29 01:27:04 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
void *
|
|
|
|
dlopen(const char *name, int mode)
|
2012-01-07 10:33:01 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (rtld_dlopen(name, -1, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
fdlopen(int fd, int mode)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (rtld_dlopen(NULL, fd, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
rtld_dlopen(const char *name, int fd, int mode)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2011-01-10 16:09:35 +00:00
|
|
|
RtldLockState lockstate;
|
2010-12-25 08:51:20 +00:00
|
|
|
int lo_flags;
|
2002-02-04 10:33:48 +00:00
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
|
2002-02-04 10:33:48 +00:00
|
|
|
ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
|
2011-01-10 16:09:35 +00:00
|
|
|
if (ld_tracing != NULL) {
|
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2011-01-10 16:09:35 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
|
|
|
environ = (char **)*get_program_var_addr("environ", &lockstate);
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
}
|
2009-11-26 13:57:20 +00:00
|
|
|
lo_flags = RTLD_LO_DLOPEN;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (mode & RTLD_NODELETE)
|
|
|
|
lo_flags |= RTLD_LO_NODELETE;
|
2009-11-26 13:57:20 +00:00
|
|
|
if (mode & RTLD_NOLOAD)
|
|
|
|
lo_flags |= RTLD_LO_NOLOAD;
|
2009-11-28 14:29:32 +00:00
|
|
|
if (ld_tracing != NULL)
|
|
|
|
lo_flags |= RTLD_LO_TRACE;
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
return (dlopen_object(name, fd, obj_main, lo_flags,
|
2010-12-25 08:51:20 +00:00
|
|
|
mode & (RTLD_MODEMASK | RTLD_GLOBAL)));
|
|
|
|
}
|
|
|
|
|
2011-12-14 16:47:53 +00:00
|
|
|
static void
|
|
|
|
dlopen_cleanup(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
obj->dl_refcount--;
|
|
|
|
unref_dag(obj);
|
|
|
|
if (obj->refcount == 0)
|
|
|
|
unload_object(obj);
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static Obj_Entry *
|
2012-01-07 10:33:01 +00:00
|
|
|
dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
|
|
|
|
int mode)
|
2010-12-25 08:51:20 +00:00
|
|
|
{
|
|
|
|
Obj_Entry **old_obj_tail;
|
|
|
|
Obj_Entry *obj;
|
|
|
|
Objlist initlist;
|
|
|
|
RtldLockState lockstate;
|
|
|
|
int result;
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_init(&initlist);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_ADD,NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
old_obj_tail = obj_tail;
|
|
|
|
obj = NULL;
|
2012-01-07 10:33:01 +00:00
|
|
|
if (name == NULL && fd == -1) {
|
1998-03-07 19:24:35 +00:00
|
|
|
obj = obj_main;
|
1999-06-25 04:50:06 +00:00
|
|
|
obj->refcount++;
|
|
|
|
} else {
|
2012-01-07 10:33:01 +00:00
|
|
|
obj = load_object(name, fd, refobj, lo_flags);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
if (obj) {
|
|
|
|
obj->dl_refcount++;
|
1999-08-30 01:48:19 +00:00
|
|
|
if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(&list_global, obj);
|
1998-04-30 07:48:02 +00:00
|
|
|
if (*old_obj_tail != NULL) { /* We loaded something new. */
|
|
|
|
assert(*old_obj_tail == obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
result = load_needed_objects(obj, lo_flags & RTLD_LO_DLOPEN);
|
2005-12-22 16:42:38 +00:00
|
|
|
init_dag(obj);
|
2010-11-04 09:29:00 +00:00
|
|
|
ref_dag(obj);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (result != -1)
|
|
|
|
result = rtld_verify_versions(&obj->dagmembers);
|
2002-10-19 10:18:29 +00:00
|
|
|
if (result != -1 && ld_tracing)
|
|
|
|
goto trace;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (result == -1 || (relocate_objects(obj, (mode & RTLD_MODEMASK)
|
|
|
|
== RTLD_NOW, &obj_rtld, &lockstate)) == -1) {
|
2011-12-14 16:47:53 +00:00
|
|
|
dlopen_cleanup(obj);
|
1998-04-30 07:48:02 +00:00
|
|
|
obj = NULL;
|
2000-01-09 21:13:48 +00:00
|
|
|
} else {
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make list of init functions to call. */
|
|
|
|
initlist_add_objects(obj, &obj->next, &initlist);
|
2000-01-09 21:13:48 +00:00
|
|
|
}
|
2003-05-08 01:31:36 +00:00
|
|
|
} else {
|
|
|
|
|
If dlopen() is called for the dso that has been already loaded as a
dependency, then the dso never has its DAG initialized. Empty DAG
makes ref_dag() call in dlopen() a nop, and the dso refcount is off
by one.
Initialize the DAG on the first dlopen() call, using a boolean flag
to prevent double initialization.
From the PR (edited):
Assume we have a library liba.so, containing a function a(), and a
library libb.so, containing function b(). liba.so needs functionality
from libb.so, so liba.so links in libb.so.
An application doesn't know about the relation between these libraries,
but needs to call a() and b(). It dlopen()s liba.so and obtains a
pointer to a(), then it dlopen()s libb.so and obtains a pointer to b().
As soon as the application doesn't need a() anymore, it dlclose()s liba.so.
Expected result: the pointer to b() is still valid and can be called
Actual result: the pointer to b() has become invalid, even though the
application did not dlclose() the handle to libb.so. On calling b(), the
application crashes with a segmentation fault.
PR: misc/151861
Based on patch by: jh
Reviewed by: kan
Tested by: Arjan van Leeuwen <freebsd-maintainer opera com>
MFC after: 1 week
2010-11-03 09:23:08 +00:00
|
|
|
/*
|
|
|
|
* Bump the reference counts for objects on this DAG. If
|
|
|
|
* this is the first dlopen() call for the object that was
|
|
|
|
* already loaded as a dependency, initialize the dag
|
|
|
|
* starting at it.
|
|
|
|
*/
|
2010-11-04 09:29:00 +00:00
|
|
|
init_dag(obj);
|
|
|
|
ref_dag(obj);
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
if ((lo_flags & RTLD_LO_TRACE) != 0)
|
2003-05-08 01:31:36 +00:00
|
|
|
goto trace;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
|
|
|
|
obj->z_nodelete) && !obj->ref_nodel) {
|
2009-03-30 08:47:28 +00:00
|
|
|
dbg("obj %s nodelete", obj->path);
|
|
|
|
ref_dag(obj);
|
|
|
|
obj->z_nodelete = obj->ref_nodel = true;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
|
|
|
|
name);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
map_stacks_exec(&lockstate);
|
2011-01-08 17:11:49 +00:00
|
|
|
|
2011-12-14 16:47:53 +00:00
|
|
|
if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
|
|
|
|
&lockstate) == -1) {
|
|
|
|
objlist_clear(&initlist);
|
|
|
|
dlopen_cleanup(obj);
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2008-09-03 01:05:32 +00:00
|
|
|
/* Call the init functions. */
|
|
|
|
objlist_call_init(&initlist, &lockstate);
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
2002-10-19 10:18:29 +00:00
|
|
|
trace:
|
|
|
|
trace_loaded_objects(obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2002-10-19 10:18:29 +00:00
|
|
|
exit(0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
static void *
|
|
|
|
do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
|
|
|
|
int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2006-09-19 16:48:08 +00:00
|
|
|
DoneList donelist;
|
|
|
|
const Obj_Entry *obj, *defobj;
|
2010-12-25 08:51:20 +00:00
|
|
|
const Elf_Sym *def;
|
|
|
|
SymLook req;
|
|
|
|
RtldLockState lockstate;
|
|
|
|
int res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-09-02 01:09:34 +00:00
|
|
|
def = NULL;
|
1999-08-30 01:48:19 +00:00
|
|
|
defobj = NULL;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
|
|
|
req.ventry = ve;
|
|
|
|
req.flags = flags | SYMLOOK_IN_PLT;
|
|
|
|
req.lockstate = &lockstate;
|
|
|
|
|
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
if (handle == NULL || handle == RTLD_NEXT ||
|
|
|
|
handle == RTLD_DEFAULT || handle == RTLD_SELF) {
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
if ((obj = obj_from_addr(retaddr)) == NULL) {
|
|
|
|
_rtld_error("Cannot determine caller's shared object");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
if (handle == NULL) { /* Just the caller's shared object. */
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, obj);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
} else if (handle == RTLD_NEXT || /* Objects after caller's */
|
|
|
|
handle == RTLD_SELF) { /* ... caller included */
|
|
|
|
if (handle == RTLD_NEXT)
|
|
|
|
obj = obj->next;
|
|
|
|
for (; obj != NULL; obj = obj->next) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, obj);
|
|
|
|
if (res == 0) {
|
|
|
|
if (def == NULL ||
|
|
|
|
ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
2008-10-10 00:16:32 +00:00
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
2010-08-24 13:01:14 +00:00
|
|
|
* dynamic linker services such as dlopen.
|
2008-10-10 00:16:32 +00:00
|
|
|
*/
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, &obj_rtld);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2000-09-19 04:27:16 +00:00
|
|
|
} else {
|
|
|
|
assert(handle == RTLD_DEFAULT);
|
2010-12-27 00:30:29 +00:00
|
|
|
res = symlook_default(&req, obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
def = req.sym_out;
|
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
} else {
|
1999-12-27 04:44:04 +00:00
|
|
|
if ((obj = dlcheck(handle)) == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
|
2006-09-08 14:59:54 +00:00
|
|
|
donelist_init(&donelist);
|
1998-09-02 01:09:34 +00:00
|
|
|
if (obj->mainprog) {
|
2011-01-28 23:44:57 +00:00
|
|
|
/* Handle obtained by dlopen(NULL, ...) implies global scope. */
|
|
|
|
res = symlook_global(&req, &donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
2011-01-28 23:44:57 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
|
|
|
* dynamic linker services such as dlopen.
|
|
|
|
*/
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
|
|
|
res = symlook_obj(&req, &obj_rtld);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
|
|
|
}
|
2011-01-28 23:44:57 +00:00
|
|
|
}
|
|
|
|
else {
|
2006-09-08 14:59:54 +00:00
|
|
|
/* Search the whole DAG rooted at the given object. */
|
2011-01-28 23:44:57 +00:00
|
|
|
res = symlook_list(&req, &obj->dagmembers, &donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
if (def != NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2001-10-15 18:48:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The value required by the caller is derived from the value
|
|
|
|
* of the symbol. For the ia64 architecture, we need to
|
|
|
|
* construct a function descriptor which the caller can use to
|
|
|
|
* call the function with the right 'gp' value. For other
|
|
|
|
* architectures and for non-functions, the value is simply
|
|
|
|
* the relocated value of the symbol.
|
|
|
|
*/
|
|
|
|
if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
|
2011-12-12 11:03:14 +00:00
|
|
|
return (make_function_pointer(def, defobj));
|
|
|
|
else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
|
|
|
|
return (rtld_resolve_ifunc(defobj, def));
|
2001-10-15 18:48:42 +00:00
|
|
|
else
|
2011-12-12 11:03:14 +00:00
|
|
|
return (defobj->relocbase + def->st_value);
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
_rtld_error("Undefined symbol \"%s\"", name);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
void *
|
|
|
|
dlsym(void *handle, const char *name)
|
|
|
|
{
|
|
|
|
return do_dlsym(handle, name, __builtin_return_address(0), NULL,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
}
|
|
|
|
|
2009-04-03 19:17:23 +00:00
|
|
|
dlfunc_t
|
|
|
|
dlfunc(void *handle, const char *name)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
void *d;
|
|
|
|
dlfunc_t f;
|
|
|
|
} rv;
|
|
|
|
|
|
|
|
rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
return (rv.f);
|
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
void *
|
|
|
|
dlvsym(void *handle, const char *name, const char *version)
|
|
|
|
{
|
|
|
|
Ver_Entry ventry;
|
|
|
|
|
|
|
|
ventry.name = version;
|
|
|
|
ventry.file = NULL;
|
|
|
|
ventry.hash = elf_hash(version);
|
|
|
|
ventry.flags= 0;
|
|
|
|
return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:27:03 +00:00
|
|
|
int
|
|
|
|
_rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2010-08-23 15:27:03 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
obj = obj_from_addr(addr);
|
|
|
|
if (obj == NULL) {
|
|
|
|
_rtld_error("No shared object contains address");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
rtld_fill_dl_phdr_info(obj, phdr_info);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
int
|
|
|
|
dladdr(const void *addr, Dl_info *info)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
const Elf_Sym *def;
|
|
|
|
void *symbol_addr;
|
|
|
|
unsigned long symoffset;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
1999-03-24 23:47:29 +00:00
|
|
|
obj = obj_from_addr(addr);
|
1999-03-24 23:37:35 +00:00
|
|
|
if (obj == NULL) {
|
1999-03-24 23:47:29 +00:00
|
|
|
_rtld_error("No shared object contains address");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1999-03-24 23:37:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
info->dli_fname = obj->path;
|
1999-03-24 23:47:29 +00:00
|
|
|
info->dli_fbase = obj->mapbase;
|
1999-03-24 23:37:35 +00:00
|
|
|
info->dli_saddr = (void *)0;
|
|
|
|
info->dli_sname = NULL;
|
|
|
|
|
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* Walk the symbol list looking for the symbol whose address is
|
1999-03-24 23:37:35 +00:00
|
|
|
* closest to the address sent in.
|
|
|
|
*/
|
|
|
|
for (symoffset = 0; symoffset < obj->nchains; symoffset++) {
|
|
|
|
def = obj->symtab + symoffset;
|
1999-03-24 23:47:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For skip the symbol if st_shndx is either SHN_UNDEF or
|
|
|
|
* SHN_COMMON.
|
|
|
|
*/
|
|
|
|
if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
|
|
|
|
continue;
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* If the symbol is greater than the specified address, or if it
|
1999-03-24 23:37:35 +00:00
|
|
|
* is further away from addr than the current nearest symbol,
|
|
|
|
* then reject it.
|
|
|
|
*/
|
1999-03-24 23:47:29 +00:00
|
|
|
symbol_addr = obj->relocbase + def->st_value;
|
|
|
|
if (symbol_addr > addr || symbol_addr < info->dli_saddr)
|
1999-03-24 23:37:35 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Update our idea of the nearest symbol. */
|
|
|
|
info->dli_sname = obj->strtab + def->st_name;
|
|
|
|
info->dli_saddr = symbol_addr;
|
|
|
|
|
|
|
|
/* Exact match? */
|
|
|
|
if (info->dli_saddr == addr)
|
|
|
|
break;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1999-03-24 23:37:35 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
int
|
|
|
|
dlinfo(void *handle, int request, void *p)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
|
|
|
int error;
|
2003-02-13 17:47:44 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
if (handle == NULL || handle == RTLD_SELF) {
|
|
|
|
void *retaddr;
|
|
|
|
|
|
|
|
retaddr = __builtin_return_address(0); /* __GNUC__ only */
|
|
|
|
if ((obj = obj_from_addr(retaddr)) == NULL)
|
|
|
|
_rtld_error("Cannot determine caller's shared object");
|
|
|
|
} else
|
|
|
|
obj = dlcheck(handle);
|
|
|
|
|
|
|
|
if (obj == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
switch (request) {
|
|
|
|
case RTLD_DI_LINKMAP:
|
|
|
|
*((struct link_map const **)p) = &obj->linkmap;
|
|
|
|
break;
|
|
|
|
case RTLD_DI_ORIGIN:
|
|
|
|
error = rtld_dirname(obj->path, p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RTLD_DI_SERINFOSIZE:
|
|
|
|
case RTLD_DI_SERINFO:
|
|
|
|
error = do_search_info(obj, request, (struct dl_serinfo *)p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
_rtld_error("Invalid request %d passed to dlinfo()", request);
|
|
|
|
error = -1;
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:27:03 +00:00
|
|
|
static void
|
|
|
|
rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
|
|
|
|
{
|
|
|
|
|
|
|
|
phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
|
|
|
|
phdr_info->dlpi_name = STAILQ_FIRST(&obj->names) ?
|
|
|
|
STAILQ_FIRST(&obj->names)->name : obj->path;
|
|
|
|
phdr_info->dlpi_phdr = obj->phdr;
|
|
|
|
phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
|
|
|
|
phdr_info->dlpi_tls_modid = obj->tlsindex;
|
|
|
|
phdr_info->dlpi_tls_data = obj->tlsinit;
|
|
|
|
phdr_info->dlpi_adds = obj_loads;
|
|
|
|
phdr_info->dlpi_subs = obj_loads - obj_count;
|
|
|
|
}
|
|
|
|
|
2007-04-03 18:31:20 +00:00
|
|
|
int
|
|
|
|
dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
|
|
|
|
{
|
|
|
|
struct dl_phdr_info phdr_info;
|
|
|
|
const Obj_Entry *obj;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState bind_lockstate, phdr_lockstate;
|
|
|
|
int error;
|
2007-04-03 18:31:20 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
|
|
|
|
rlock_acquire(rtld_bind_lock, &bind_lockstate);
|
2007-04-03 18:31:20 +00:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next) {
|
2010-08-23 15:27:03 +00:00
|
|
|
rtld_fill_dl_phdr_info(obj, &phdr_info);
|
2007-04-03 18:31:20 +00:00
|
|
|
if ((error = callback(&phdr_info, sizeof phdr_info, param)) != 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &bind_lockstate);
|
|
|
|
lock_release(rtld_phdr_lock, &phdr_lockstate);
|
2007-04-03 18:31:20 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
struct fill_search_info_args {
|
|
|
|
int request;
|
|
|
|
unsigned int flags;
|
|
|
|
Dl_serinfo *serinfo;
|
|
|
|
Dl_serpath *serpath;
|
|
|
|
char *strspace;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void *
|
|
|
|
fill_search_info(const char *dir, size_t dirlen, void *param)
|
|
|
|
{
|
|
|
|
struct fill_search_info_args *arg;
|
|
|
|
|
|
|
|
arg = param;
|
|
|
|
|
|
|
|
if (arg->request == RTLD_DI_SERINFOSIZE) {
|
|
|
|
arg->serinfo->dls_cnt ++;
|
2005-11-11 19:57:41 +00:00
|
|
|
arg->serinfo->dls_size += sizeof(Dl_serpath) + dirlen + 1;
|
2003-02-13 17:47:44 +00:00
|
|
|
} else {
|
|
|
|
struct dl_serpath *s_entry;
|
|
|
|
|
|
|
|
s_entry = arg->serpath;
|
|
|
|
s_entry->dls_name = arg->strspace;
|
|
|
|
s_entry->dls_flags = arg->flags;
|
|
|
|
|
|
|
|
strncpy(arg->strspace, dir, dirlen);
|
|
|
|
arg->strspace[dirlen] = '\0';
|
|
|
|
|
|
|
|
arg->strspace += dirlen + 1;
|
|
|
|
arg->serpath++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
|
|
|
|
{
|
|
|
|
struct dl_serinfo _info;
|
|
|
|
struct fill_search_info_args args;
|
|
|
|
|
|
|
|
args.request = RTLD_DI_SERINFOSIZE;
|
|
|
|
args.serinfo = &_info;
|
|
|
|
|
|
|
|
_info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
|
|
|
|
_info.dls_cnt = 0;
|
|
|
|
|
|
|
|
path_enumerate(ld_library_path, fill_search_info, &args);
|
|
|
|
path_enumerate(obj->rpath, fill_search_info, &args);
|
|
|
|
path_enumerate(gethints(), fill_search_info, &args);
|
|
|
|
path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args);
|
|
|
|
|
|
|
|
|
|
|
|
if (request == RTLD_DI_SERINFOSIZE) {
|
|
|
|
info->dls_size = _info.dls_size;
|
|
|
|
info->dls_cnt = _info.dls_cnt;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
|
|
|
|
_rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
args.request = RTLD_DI_SERINFO;
|
|
|
|
args.serinfo = info;
|
|
|
|
args.serpath = &info->dls_serpath[0];
|
|
|
|
args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
|
|
|
|
|
|
|
|
args.flags = LA_SER_LIBPATH;
|
|
|
|
if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_RUNPATH;
|
|
|
|
if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_CONFIG;
|
|
|
|
if (path_enumerate(gethints(), fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_DEFAULT;
|
|
|
|
if (path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_dirname(const char *path, char *bname)
|
|
|
|
{
|
|
|
|
const char *endp;
|
|
|
|
|
|
|
|
/* Empty or NULL string gets treated as "." */
|
|
|
|
if (path == NULL || *path == '\0') {
|
|
|
|
bname[0] = '.';
|
2003-02-13 22:47:41 +00:00
|
|
|
bname[1] = '\0';
|
2003-02-13 17:47:44 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Strip trailing slashes */
|
|
|
|
endp = path + strlen(path) - 1;
|
|
|
|
while (endp > path && *endp == '/')
|
|
|
|
endp--;
|
|
|
|
|
|
|
|
/* Find the start of the dir */
|
|
|
|
while (endp > path && *endp != '/')
|
|
|
|
endp--;
|
|
|
|
|
|
|
|
/* Either the dir is "/" or there are no slashes */
|
|
|
|
if (endp == path) {
|
|
|
|
bname[0] = *endp == '/' ? '/' : '.';
|
|
|
|
bname[1] = '\0';
|
|
|
|
return (0);
|
|
|
|
} else {
|
|
|
|
do {
|
|
|
|
endp--;
|
|
|
|
} while (endp > path && *endp == '/');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (endp - path + 2 > PATH_MAX)
|
|
|
|
{
|
|
|
|
_rtld_error("Filename is too long: %s", path);
|
|
|
|
return(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
strncpy(bname, path, endp - path + 1);
|
|
|
|
bname[endp - path + 1] = '\0';
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
static int
|
|
|
|
rtld_dirname_abs(const char *path, char *base)
|
|
|
|
{
|
|
|
|
char base_rel[PATH_MAX];
|
|
|
|
|
|
|
|
if (rtld_dirname(path, base) == -1)
|
|
|
|
return (-1);
|
|
|
|
if (base[0] == '/')
|
|
|
|
return (0);
|
|
|
|
if (getcwd(base_rel, sizeof(base_rel)) == NULL ||
|
|
|
|
strlcat(base_rel, "/", sizeof(base_rel)) >= sizeof(base_rel) ||
|
|
|
|
strlcat(base_rel, base, sizeof(base_rel)) >= sizeof(base_rel))
|
|
|
|
return (-1);
|
|
|
|
strcpy(base, base_rel);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
static void
|
|
|
|
linkmap_add(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
struct link_map *prev;
|
|
|
|
|
|
|
|
obj->linkmap.l_name = obj->path;
|
|
|
|
obj->linkmap.l_addr = obj->mapbase;
|
|
|
|
obj->linkmap.l_ld = obj->dynamic;
|
|
|
|
#ifdef __mips__
|
|
|
|
/* GDB needs load offset on MIPS to use the symbols */
|
|
|
|
obj->linkmap.l_offs = obj->relocbase;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (r_debug.r_map == NULL) {
|
|
|
|
r_debug.r_map = l;
|
|
|
|
return;
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
|
1998-09-16 02:54:08 +00:00
|
|
|
/*
|
|
|
|
* Scan to the end of the list, but not past the entry for the
|
|
|
|
* dynamic linker, which we want to keep at the very end.
|
|
|
|
*/
|
|
|
|
for (prev = r_debug.r_map;
|
|
|
|
prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
|
|
|
|
prev = prev->l_next)
|
1998-04-30 07:48:02 +00:00
|
|
|
;
|
1998-09-16 02:54:08 +00:00
|
|
|
|
|
|
|
/* Link in the new entry. */
|
1998-04-30 07:48:02 +00:00
|
|
|
l->l_prev = prev;
|
1998-09-16 02:54:08 +00:00
|
|
|
l->l_next = prev->l_next;
|
|
|
|
if (l->l_next != NULL)
|
|
|
|
l->l_next->l_prev = l;
|
1998-04-30 07:48:02 +00:00
|
|
|
prev->l_next = l;
|
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
|
|
|
linkmap_delete(Obj_Entry *obj)
|
1998-04-30 07:48:02 +00:00
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
|
|
|
|
if (l->l_prev == NULL) {
|
|
|
|
if ((r_debug.r_map = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((l->l_prev->l_next = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = l->l_prev;
|
|
|
|
}
|
1998-05-01 08:39:27 +00:00
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Function for the debugger to set a breakpoint on to gain control.
|
2000-08-26 05:13:29 +00:00
|
|
|
*
|
|
|
|
* The two parameters allow the debugger to easily find and determine
|
|
|
|
* what the runtime loader is doing and to whom it is doing it.
|
|
|
|
*
|
|
|
|
* When the loadhook trap is hit (r_debug_state, set at program
|
|
|
|
* initialization), the arguments can be found on the stack:
|
|
|
|
*
|
|
|
|
* +8 struct link_map *m
|
|
|
|
* +4 struct r_debug *rd
|
|
|
|
* +0 RetAddr
|
1998-09-02 02:00:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(struct r_debug* rd, struct link_map *m)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2011-09-03 11:41:00 +00:00
|
|
|
/*
|
|
|
|
* The following is a hack to force the compiler to emit calls to
|
|
|
|
* this function, even when optimizing. If the function is empty,
|
|
|
|
* the compiler is not obliged to emit any code for calls to it,
|
|
|
|
* even when marked __noinline. However, gdb depends on those
|
|
|
|
* calls being made.
|
|
|
|
*/
|
|
|
|
__asm __volatile("" : : : "memory");
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
/*
|
2002-02-04 10:33:48 +00:00
|
|
|
* Get address of the pointer variable in the main program.
|
2011-01-10 16:09:35 +00:00
|
|
|
* Prefer non-weak symbol over the weak one.
|
1999-04-21 04:06:57 +00:00
|
|
|
*/
|
2002-02-04 10:33:48 +00:00
|
|
|
static const void **
|
2011-01-10 16:09:35 +00:00
|
|
|
get_program_var_addr(const char *name, RtldLockState *lockstate)
|
1999-04-21 04:06:57 +00:00
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req;
|
2011-01-10 16:09:35 +00:00
|
|
|
DoneList donelist;
|
1999-04-21 04:06:57 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
2011-01-10 16:09:35 +00:00
|
|
|
req.lockstate = lockstate;
|
|
|
|
donelist_init(&donelist);
|
|
|
|
if (symlook_global(&req, &donelist) != 0)
|
|
|
|
return (NULL);
|
2011-01-25 21:04:55 +00:00
|
|
|
if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
|
|
|
|
return ((const void **)make_function_pointer(req.sym_out,
|
|
|
|
req.defobj_out));
|
2011-12-12 11:03:14 +00:00
|
|
|
else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
|
|
|
|
return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
|
2011-01-25 21:04:55 +00:00
|
|
|
else
|
|
|
|
return ((const void **)(req.defobj_out->relocbase +
|
|
|
|
req.sym_out->st_value));
|
2002-02-04 10:33:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a pointer variable in the main program to the given value. This
|
|
|
|
* is used to set key variables such as "environ" before any of the
|
|
|
|
* init functions are called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
set_program_var(const char *name, const void *value)
|
|
|
|
{
|
|
|
|
const void **addr;
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
if ((addr = get_program_var_addr(name, NULL)) != NULL) {
|
2002-02-04 10:33:48 +00:00
|
|
|
dbg("\"%s\": *%p <-- %p", name, addr, value);
|
|
|
|
*addr = value;
|
|
|
|
}
|
1999-04-21 04:06:57 +00:00
|
|
|
}
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
/*
|
|
|
|
* Search the global objects, including dependencies and main object,
|
|
|
|
* for the given symbol.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
symlook_global(SymLook *req, DoneList *donelist)
|
|
|
|
{
|
|
|
|
SymLook req1;
|
|
|
|
const Objlist_Entry *elm;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
|
|
|
|
/* Search all objects loaded at program start up. */
|
|
|
|
if (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
|
|
|
|
res = symlook_list(&req1, &list_main, donelist);
|
|
|
|
if (res == 0 && (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search all DAGs whose roots are RTLD_GLOBAL objects. */
|
|
|
|
STAILQ_FOREACH(elm, &list_global, link) {
|
|
|
|
if (req->defobj_out != NULL &&
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
|
|
|
|
if (res == 0 && (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (req->sym_out != NULL ? 0 : ESRCH);
|
|
|
|
}
|
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
/*
|
|
|
|
* Given a symbol name in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_default(SymLook *req, const Obj_Entry *refobj)
|
2000-09-19 04:27:16 +00:00
|
|
|
{
|
|
|
|
DoneList donelist;
|
|
|
|
const Objlist_Entry *elm;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
2011-01-10 16:09:35 +00:00
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
donelist_init(&donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
2000-09-19 04:27:16 +00:00
|
|
|
|
|
|
|
/* Look first in the referencing object if linked symbolically. */
|
|
|
|
if (refobj->symbolic && !donelist_check(&donelist, refobj)) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req1, refobj);
|
|
|
|
if (res == 0) {
|
2011-01-10 16:09:35 +00:00
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
symlook_global(req, &donelist);
|
2000-09-19 04:27:16 +00:00
|
|
|
|
2002-10-19 23:03:35 +00:00
|
|
|
/* Search all dlopened DAGs containing the referencing object. */
|
|
|
|
STAILQ_FOREACH(elm, &refobj->dldags, link) {
|
2011-01-10 16:09:35 +00:00
|
|
|
if (req->sym_out != NULL &&
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
|
2002-10-19 23:03:35 +00:00
|
|
|
break;
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
|
2011-01-10 16:09:35 +00:00
|
|
|
if (res == 0 && (req->sym_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
2010-08-24 13:01:14 +00:00
|
|
|
* dynamic linker services such as dlopen.
|
2000-09-19 04:27:16 +00:00
|
|
|
*/
|
2011-01-10 16:09:35 +00:00
|
|
|
if (req->sym_out == NULL ||
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req1, &obj_rtld);
|
|
|
|
if (res == 0) {
|
2011-01-10 16:09:35 +00:00
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
return (req->sym_out != NULL ? 0 : ESRCH);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
|
|
|
const Objlist_Entry *elm;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
1999-08-30 01:48:19 +00:00
|
|
|
|
|
|
|
def = NULL;
|
|
|
|
defobj = NULL;
|
|
|
|
STAILQ_FOREACH(elm, objlist, link) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
if (donelist_check(dlp, elm->obj))
|
1999-08-30 01:48:19 +00:00
|
|
|
continue;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
if ((res = symlook_obj(&req1, elm->obj)) == 0) {
|
|
|
|
if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req1.sym_out;
|
|
|
|
defobj = req1.defobj_out;
|
1999-08-30 01:48:19 +00:00
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (def != NULL) {
|
|
|
|
req->sym_out = def;
|
|
|
|
req->defobj_out = defobj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
2006-09-19 12:47:13 +00:00
|
|
|
/*
|
2011-01-28 23:44:57 +00:00
|
|
|
* Search the chain of DAGS cointed to by the given Needed_Entry
|
|
|
|
* for a symbol of the given name. Each DAG is scanned completely
|
|
|
|
* before advancing to the next one. Returns a pointer to the symbol,
|
|
|
|
* or NULL if no definition was found.
|
2006-09-19 12:47:13 +00:00
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
|
2006-09-19 12:47:13 +00:00
|
|
|
{
|
2011-01-28 23:44:57 +00:00
|
|
|
const Elf_Sym *def;
|
2006-09-19 12:47:13 +00:00
|
|
|
const Needed_Entry *n;
|
2011-01-28 23:44:57 +00:00
|
|
|
const Obj_Entry *defobj;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
2006-09-19 16:48:08 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
def = NULL;
|
2006-09-19 12:47:13 +00:00
|
|
|
defobj = NULL;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
2006-09-19 12:47:13 +00:00
|
|
|
for (n = needed; n != NULL; n = n->next) {
|
2011-01-28 23:44:57 +00:00
|
|
|
if (n->obj == NULL ||
|
|
|
|
(res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
|
2006-09-19 16:48:08 +00:00
|
|
|
continue;
|
2011-01-28 23:44:57 +00:00
|
|
|
if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req1.sym_out;
|
|
|
|
defobj = req1.defobj_out;
|
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
2006-09-19 16:48:08 +00:00
|
|
|
}
|
2006-09-19 12:47:13 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (def != NULL) {
|
|
|
|
req->sym_out = def;
|
|
|
|
req->defobj_out = defobj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
2006-09-19 12:47:13 +00:00
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Search the symbol table of a single shared object for a symbol of
|
2005-12-18 19:43:33 +00:00
|
|
|
* the given name and version, if requested. Returns a pointer to the
|
2010-12-25 08:51:20 +00:00
|
|
|
* symbol, or NULL if no definition was found. If the object is
|
|
|
|
* filter, return filtered symbol from filtee.
|
1998-09-02 02:00:20 +00:00
|
|
|
*
|
|
|
|
* The symbol's hash value is passed in for efficiency reasons; that
|
|
|
|
* eliminates many recomputations of the hash value.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
int
|
|
|
|
symlook_obj(SymLook *req, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
DoneList donelist;
|
|
|
|
SymLook req1;
|
|
|
|
int res, mres;
|
|
|
|
|
|
|
|
mres = symlook_obj1(req, obj);
|
|
|
|
if (mres == 0) {
|
|
|
|
if (obj->needed_filtees != NULL) {
|
|
|
|
load_filtees(__DECONST(Obj_Entry *, obj), 0, req->lockstate);
|
|
|
|
donelist_init(&donelist);
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
res = symlook_needed(&req1, obj->needed_filtees, &donelist);
|
|
|
|
if (res == 0) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
if (obj->needed_aux_filtees != NULL) {
|
|
|
|
load_filtees(__DECONST(Obj_Entry *, obj), 0, req->lockstate);
|
|
|
|
donelist_init(&donelist);
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
|
|
|
|
if (res == 0) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (mres);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
symlook_obj1(SymLook *req, const Obj_Entry *obj)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2005-12-18 19:43:33 +00:00
|
|
|
unsigned long symnum;
|
|
|
|
const Elf_Sym *vsymp;
|
|
|
|
Elf_Versym verndx;
|
|
|
|
int vcount;
|
|
|
|
|
|
|
|
if (obj->buckets == NULL)
|
2010-12-25 08:51:20 +00:00
|
|
|
return (ESRCH);
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
vsymp = NULL;
|
|
|
|
vcount = 0;
|
2010-12-25 08:51:20 +00:00
|
|
|
symnum = obj->buckets[req->hash % obj->nbuckets];
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
for (; symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
|
|
|
|
const Elf_Sym *symp;
|
|
|
|
const char *strp;
|
|
|
|
|
|
|
|
if (symnum >= obj->nchains)
|
2010-12-25 08:51:20 +00:00
|
|
|
return (ESRCH); /* Bad object */
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
symp = obj->symtab + symnum;
|
|
|
|
strp = obj->strtab + symp->st_name;
|
|
|
|
|
|
|
|
switch (ELF_ST_TYPE(symp->st_info)) {
|
|
|
|
case STT_FUNC:
|
|
|
|
case STT_NOTYPE:
|
|
|
|
case STT_OBJECT:
|
2011-12-12 11:03:14 +00:00
|
|
|
case STT_GNU_IFUNC:
|
2005-12-18 19:43:33 +00:00
|
|
|
if (symp->st_value == 0)
|
|
|
|
continue;
|
|
|
|
/* fallthrough */
|
|
|
|
case STT_TLS:
|
2008-04-04 20:59:26 +00:00
|
|
|
if (symp->st_shndx != SHN_UNDEF)
|
|
|
|
break;
|
|
|
|
#ifndef __mips__
|
2010-12-25 08:51:20 +00:00
|
|
|
else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
|
2008-04-04 20:59:26 +00:00
|
|
|
(ELF_ST_TYPE(symp->st_info) == STT_FUNC))
|
2005-12-18 19:43:33 +00:00
|
|
|
break;
|
|
|
|
/* fallthrough */
|
2008-04-04 20:59:26 +00:00
|
|
|
#endif
|
2005-12-18 19:43:33 +00:00
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
|
2005-12-18 19:43:33 +00:00
|
|
|
continue;
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
if (req->ventry == NULL) {
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj->versyms != NULL) {
|
|
|
|
verndx = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (verndx > obj->vernum) {
|
|
|
|
_rtld_error("%s: symbol %s references wrong version %d",
|
|
|
|
obj->path, obj->strtab + symnum, verndx);
|
|
|
|
continue;
|
|
|
|
}
|
2005-12-24 15:37:30 +00:00
|
|
|
/*
|
|
|
|
* If we are not called from dlsym (i.e. this is a normal
|
2010-12-25 08:51:20 +00:00
|
|
|
* relocation from unversioned binary), accept the symbol
|
2005-12-24 15:37:30 +00:00
|
|
|
* immediately if it happens to have first version after
|
|
|
|
* this shared object became versioned. Otherwise, if
|
|
|
|
* symbol is versioned and not hidden, remember it. If it
|
|
|
|
* is the only symbol with this name exported by the
|
|
|
|
* shared object, it will be returned as a match at the
|
|
|
|
* end of the function. If symbol is global (verndx < 2)
|
|
|
|
* accept it unconditionally.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
if ((req->flags & SYMLOOK_DLSYM) == 0 &&
|
|
|
|
verndx == VER_NDX_GIVEN) {
|
|
|
|
req->sym_out = symp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
else if (verndx >= VER_NDX_GIVEN) {
|
2005-12-24 15:37:30 +00:00
|
|
|
if ((obj->versyms[symnum] & VER_NDX_HIDDEN) == 0) {
|
|
|
|
if (vsymp == NULL)
|
|
|
|
vsymp = symp;
|
|
|
|
vcount ++;
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
req->sym_out = symp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
2005-12-18 19:43:33 +00:00
|
|
|
} else {
|
|
|
|
if (obj->versyms == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
if (object_match_name(obj, req->ventry->name)) {
|
2006-08-04 13:37:54 +00:00
|
|
|
_rtld_error("%s: object %s should provide version %s for "
|
2010-12-25 08:51:20 +00:00
|
|
|
"symbol %s", obj_rtld.path, obj->path,
|
|
|
|
req->ventry->name, obj->strtab + symnum);
|
2005-12-18 19:43:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
verndx = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (verndx > obj->vernum) {
|
|
|
|
_rtld_error("%s: symbol %s references wrong version %d",
|
|
|
|
obj->path, obj->strtab + symnum, verndx);
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (obj->vertab[verndx].hash != req->ventry->hash ||
|
|
|
|
strcmp(obj->vertab[verndx].name, req->ventry->name)) {
|
2005-12-18 19:43:33 +00:00
|
|
|
/*
|
2005-12-24 15:37:30 +00:00
|
|
|
* Version does not match. Look if this is a global symbol
|
|
|
|
* and if it is not hidden. If global symbol (verndx < 2)
|
2005-12-18 19:43:33 +00:00
|
|
|
* is available, use it. Do not return symbol if we are
|
|
|
|
* called by dlvsym, because dlvsym looks for a specific
|
|
|
|
* version and default one is not what dlvsym wants.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
if ((req->flags & SYMLOOK_DLSYM) ||
|
2005-12-18 19:43:33 +00:00
|
|
|
(obj->versyms[symnum] & VER_NDX_HIDDEN) ||
|
|
|
|
(verndx >= VER_NDX_GIVEN))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
req->sym_out = symp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
1999-04-09 00:28:43 +00:00
|
|
|
}
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (vcount == 1) {
|
|
|
|
req->sym_out = vsymp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
trace_loaded_objects(Obj_Entry *obj)
|
1998-05-01 08:39:27 +00:00
|
|
|
{
|
2002-02-17 07:04:32 +00:00
|
|
|
char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
|
1998-05-01 08:39:27 +00:00
|
|
|
int c;
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
if ((main_local = getenv(LD_ "TRACE_LOADED_OBJECTS_PROGNAME")) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
main_local = "";
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
if ((fmt1 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT1")) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
fmt1 = "\t%o => %p (%x)\n";
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
if ((fmt2 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT2")) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
fmt2 = "\t%o (%x)\n";
|
|
|
|
|
2004-03-21 01:21:26 +00:00
|
|
|
list_containers = getenv(LD_ "TRACE_LOADED_OBJECTS_ALL");
|
2002-02-17 07:04:32 +00:00
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
for (; obj; obj = obj->next) {
|
|
|
|
Needed_Entry *needed;
|
|
|
|
char *name, *path;
|
|
|
|
bool is_lib;
|
|
|
|
|
2002-02-17 07:04:32 +00:00
|
|
|
if (list_containers && obj->needed != NULL)
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%s:\n", obj->path);
|
1998-05-01 08:39:27 +00:00
|
|
|
for (needed = obj->needed; needed; needed = needed->next) {
|
1998-09-02 02:51:12 +00:00
|
|
|
if (needed->obj != NULL) {
|
2002-02-17 07:04:32 +00:00
|
|
|
if (needed->obj->traced && !list_containers)
|
1998-09-02 02:51:12 +00:00
|
|
|
continue;
|
|
|
|
needed->obj->traced = true;
|
1998-05-01 08:39:27 +00:00
|
|
|
path = needed->obj->path;
|
1998-09-02 02:51:12 +00:00
|
|
|
} else
|
|
|
|
path = "not found";
|
|
|
|
|
|
|
|
name = (char *)obj->strtab + needed->name;
|
|
|
|
is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
|
1998-05-01 08:39:27 +00:00
|
|
|
|
|
|
|
fmt = is_lib ? fmt1 : fmt2;
|
|
|
|
while ((c = *fmt++) != '\0') {
|
|
|
|
switch (c) {
|
|
|
|
default:
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar(c);
|
1998-05-01 08:39:27 +00:00
|
|
|
continue;
|
|
|
|
case '\\':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case 'n':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar('\n');
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 't':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar('\t');
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '%':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case '%':
|
|
|
|
default:
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar(c);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'A':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(main_local);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'a':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(obj_main->path);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'o':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(name);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
#if 0
|
|
|
|
case 'm':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%d", sodp->sod_major);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'n':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%d", sodp->sod_minor);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case 'p':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(path);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'x':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%p", needed->obj ? needed->obj->mapbase :
|
|
|
|
0);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++fmt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
/*
|
2000-01-09 21:13:48 +00:00
|
|
|
* Unload a dlopened object and its dependencies from memory and from
|
|
|
|
* our data structures. It is assumed that the DAG rooted in the
|
|
|
|
* object has already been unreferenced, and that the object has a
|
|
|
|
* reference count of 0.
|
1999-08-30 01:48:19 +00:00
|
|
|
*/
|
1999-07-09 16:22:55 +00:00
|
|
|
static void
|
2000-01-09 21:13:48 +00:00
|
|
|
unload_object(Obj_Entry *root)
|
1999-07-09 16:22:55 +00:00
|
|
|
{
|
2000-01-09 21:13:48 +00:00
|
|
|
Obj_Entry *obj;
|
|
|
|
Obj_Entry **linkp;
|
|
|
|
|
|
|
|
assert(root->refcount == 0);
|
|
|
|
|
2003-02-17 20:58:27 +00:00
|
|
|
/*
|
|
|
|
* Pass over the DAG removing unreferenced objects from
|
|
|
|
* appropriate lists.
|
2003-05-29 22:58:26 +00:00
|
|
|
*/
|
2003-02-17 20:58:27 +00:00
|
|
|
unlink_object(root);
|
|
|
|
|
2000-01-09 21:13:48 +00:00
|
|
|
/* Unmap all objects that are no longer referenced. */
|
|
|
|
linkp = &obj_list->next;
|
|
|
|
while ((obj = *linkp) != NULL) {
|
|
|
|
if (obj->refcount == 0) {
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
|
|
|
|
obj->path);
|
2000-01-09 21:13:48 +00:00
|
|
|
dbg("unloading \"%s\"", obj->path);
|
2010-12-25 08:51:20 +00:00
|
|
|
unload_filtees(root);
|
2000-01-09 21:13:48 +00:00
|
|
|
munmap(obj->mapbase, obj->mapsize);
|
|
|
|
linkmap_delete(obj);
|
|
|
|
*linkp = obj->next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count--;
|
2000-01-09 21:13:48 +00:00
|
|
|
obj_free(obj);
|
|
|
|
} else
|
|
|
|
linkp = &obj->next;
|
1999-07-09 16:22:55 +00:00
|
|
|
}
|
2000-01-09 21:13:48 +00:00
|
|
|
obj_tail = linkp;
|
1999-07-09 16:22:55 +00:00
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
2003-02-17 20:58:27 +00:00
|
|
|
unlink_object(Obj_Entry *root)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2003-02-10 23:15:07 +00:00
|
|
|
Objlist_Entry *elm;
|
1999-08-20 22:33:44 +00:00
|
|
|
|
2003-02-10 23:15:07 +00:00
|
|
|
if (root->refcount == 0) {
|
|
|
|
/* Remove the object from the RTLD_GLOBAL list. */
|
|
|
|
objlist_remove(&list_global, root);
|
|
|
|
|
|
|
|
/* Remove the object from all objects' DAG lists. */
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
2003-02-10 23:15:07 +00:00
|
|
|
objlist_remove(&elm->obj->dldags, root);
|
2003-05-08 01:31:36 +00:00
|
|
|
if (elm->obj != root)
|
|
|
|
unlink_object(elm->obj);
|
|
|
|
}
|
2003-02-10 23:15:07 +00:00
|
|
|
}
|
2003-05-08 01:31:36 +00:00
|
|
|
}
|
2003-02-17 20:58:27 +00:00
|
|
|
|
2003-05-08 01:31:36 +00:00
|
|
|
static void
|
|
|
|
ref_dag(Obj_Entry *root)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
2010-11-04 09:29:00 +00:00
|
|
|
assert(root->dag_inited);
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link)
|
2003-05-08 01:31:36 +00:00
|
|
|
elm->obj->refcount++;
|
2003-02-17 20:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unref_dag(Obj_Entry *root)
|
|
|
|
{
|
2003-05-08 01:31:36 +00:00
|
|
|
Objlist_Entry *elm;
|
2003-02-17 20:58:27 +00:00
|
|
|
|
2010-11-04 09:29:00 +00:00
|
|
|
assert(root->dag_inited);
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link)
|
2003-05-08 01:31:36 +00:00
|
|
|
elm->obj->refcount--;
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Common code for MD __tls_get_addr().
|
|
|
|
*/
|
2012-03-10 08:49:44 +00:00
|
|
|
static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
|
|
|
|
static void *
|
|
|
|
tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
2012-03-10 08:49:44 +00:00
|
|
|
Elf_Addr *newdtv, *dtv;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2012-03-10 08:49:44 +00:00
|
|
|
int to_copy;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2012-03-10 08:49:44 +00:00
|
|
|
dtv = *dtvp;
|
2004-08-03 08:51:00 +00:00
|
|
|
/* Check dtv generation in case new modules have arrived */
|
|
|
|
if (dtv[0] != tls_dtv_generation) {
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2004-08-03 08:51:00 +00:00
|
|
|
newdtv = calloc(1, (tls_max_index + 2) * sizeof(Elf_Addr));
|
|
|
|
to_copy = dtv[1];
|
|
|
|
if (to_copy > tls_max_index)
|
|
|
|
to_copy = tls_max_index;
|
|
|
|
memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
|
|
|
|
newdtv[0] = tls_dtv_generation;
|
|
|
|
newdtv[1] = tls_max_index;
|
|
|
|
free(dtv);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2011-09-15 11:50:09 +00:00
|
|
|
dtv = *dtvp = newdtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Dynamically allocate module TLS if necessary */
|
2012-03-10 08:49:44 +00:00
|
|
|
if (dtv[index + 1] == 0) {
|
2005-03-20 23:28:25 +00:00
|
|
|
/* Signal safe, wlock will block out signals. */
|
2012-03-10 08:49:44 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
if (!dtv[index + 1])
|
|
|
|
dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
}
|
2012-03-10 08:49:44 +00:00
|
|
|
return ((void *)(dtv[index + 1] + offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
|
|
|
|
{
|
|
|
|
Elf_Addr *dtv;
|
|
|
|
|
|
|
|
dtv = *dtvp;
|
|
|
|
/* Check dtv generation in case new modules have arrived */
|
|
|
|
if (__predict_true(dtv[0] == tls_dtv_generation &&
|
|
|
|
dtv[index + 1] != 0))
|
|
|
|
return ((void *)(dtv[index + 1] + offset));
|
|
|
|
return (tls_get_addr_slow(dtvp, index, offset));
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
2012-02-14 00:16:34 +00:00
|
|
|
#if defined(__arm__) || defined(__ia64__) || defined(__mips__) || defined(__powerpc__)
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate Static TLS using the Variant I method.
|
|
|
|
*/
|
|
|
|
void *
|
2006-09-01 06:13:16 +00:00
|
|
|
allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2006-09-01 06:13:16 +00:00
|
|
|
char *tcb;
|
|
|
|
Elf_Addr **tls;
|
|
|
|
Elf_Addr *dtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
Elf_Addr addr;
|
|
|
|
int i;
|
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
|
|
|
|
return (oldtcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
assert(tcbsize >= TLS_TCB_SIZE);
|
|
|
|
tcb = calloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize);
|
|
|
|
tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
if (oldtcb != NULL) {
|
|
|
|
memcpy(tls, oldtcb, tls_static_space);
|
|
|
|
free(oldtcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
/* Adjust the DTV. */
|
|
|
|
dtv = tls[0];
|
|
|
|
for (i = 0; i < dtv[1]; i++) {
|
|
|
|
if (dtv[i+2] >= (Elf_Addr)oldtcb &&
|
|
|
|
dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
|
|
|
|
dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls;
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2006-09-01 06:13:16 +00:00
|
|
|
dtv = calloc(tls_max_index + 2, sizeof(Elf_Addr));
|
|
|
|
tls[0] = dtv;
|
|
|
|
dtv[0] = tls_dtv_generation;
|
|
|
|
dtv[1] = tls_max_index;
|
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
for (obj = objs; obj; obj = obj->next) {
|
2010-02-16 02:48:11 +00:00
|
|
|
if (obj->tlsoffset > 0) {
|
2004-08-03 08:51:00 +00:00
|
|
|
addr = (Elf_Addr)tls + obj->tlsoffset;
|
2010-02-16 02:48:11 +00:00
|
|
|
if (obj->tlsinitsize > 0)
|
|
|
|
memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
if (obj->tlssize > obj->tlsinitsize)
|
|
|
|
memset((void*) (addr + obj->tlsinitsize), 0,
|
|
|
|
obj->tlssize - obj->tlsinitsize);
|
2004-08-03 08:51:00 +00:00
|
|
|
dtv[obj->tlsindex + 1] = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
return (tcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-09-01 06:13:16 +00:00
|
|
|
free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
2006-09-01 06:13:16 +00:00
|
|
|
Elf_Addr *dtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
Elf_Addr tlsstart, tlsend;
|
2006-09-01 06:13:16 +00:00
|
|
|
int dtvsize, i;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
assert(tcbsize >= TLS_TCB_SIZE);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE;
|
|
|
|
tlsend = tlsstart + tls_static_space;
|
|
|
|
|
|
|
|
dtv = *(Elf_Addr **)tlsstart;
|
2004-08-03 08:51:00 +00:00
|
|
|
dtvsize = dtv[1];
|
|
|
|
for (i = 0; i < dtvsize; i++) {
|
2006-09-01 06:13:16 +00:00
|
|
|
if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
|
|
|
|
free((void*)dtv[i+2]);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
}
|
2006-09-01 06:13:16 +00:00
|
|
|
free(dtv);
|
|
|
|
free(tcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-02-14 00:16:34 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate Static TLS using the Variant II method.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
size_t size;
|
|
|
|
char *tls;
|
|
|
|
Elf_Addr *dtv, *olddtv;
|
|
|
|
Elf_Addr segbase, oldsegbase, addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
size = round(tls_static_space, tcbalign);
|
|
|
|
|
|
|
|
assert(tcbsize >= 2*sizeof(Elf_Addr));
|
2006-03-28 06:14:22 +00:00
|
|
|
tls = calloc(1, size + tcbsize);
|
2005-03-30 08:28:26 +00:00
|
|
|
dtv = calloc(1, (tls_max_index + 2) * sizeof(Elf_Addr));
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
segbase = (Elf_Addr)(tls + size);
|
|
|
|
((Elf_Addr*)segbase)[0] = segbase;
|
|
|
|
((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
|
|
|
|
|
|
|
|
dtv[0] = tls_dtv_generation;
|
|
|
|
dtv[1] = tls_max_index;
|
|
|
|
|
|
|
|
if (oldtls) {
|
|
|
|
/*
|
|
|
|
* Copy the static TLS block over whole.
|
|
|
|
*/
|
|
|
|
oldsegbase = (Elf_Addr) oldtls;
|
|
|
|
memcpy((void *)(segbase - tls_static_space),
|
|
|
|
(const void *)(oldsegbase - tls_static_space),
|
|
|
|
tls_static_space);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any dynamic TLS blocks have been created tls_get_addr(),
|
|
|
|
* move them over.
|
|
|
|
*/
|
|
|
|
olddtv = ((Elf_Addr**)oldsegbase)[1];
|
|
|
|
for (i = 0; i < olddtv[1]; i++) {
|
|
|
|
if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
|
|
|
|
dtv[i+2] = olddtv[i+2];
|
|
|
|
olddtv[i+2] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume that this block was the one we created with
|
|
|
|
* allocate_initial_tls().
|
|
|
|
*/
|
|
|
|
free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
|
|
|
|
} else {
|
|
|
|
for (obj = objs; obj; obj = obj->next) {
|
|
|
|
if (obj->tlsoffset) {
|
|
|
|
addr = segbase - obj->tlsoffset;
|
|
|
|
memset((void*) (addr + obj->tlsinitsize),
|
|
|
|
0, obj->tlssize - obj->tlsinitsize);
|
|
|
|
if (obj->tlsinit)
|
|
|
|
memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
dtv[obj->tlsindex + 1] = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (void*) segbase;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
free_tls(void *tls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
Elf_Addr* dtv;
|
|
|
|
int dtvsize, i;
|
|
|
|
Elf_Addr tlsstart, tlsend;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out the size of the initial TLS block so that we can
|
|
|
|
* find stuff which ___tls_get_addr() allocated dynamically.
|
|
|
|
*/
|
|
|
|
size = round(tls_static_space, tcbalign);
|
|
|
|
|
|
|
|
dtv = ((Elf_Addr**)tls)[1];
|
|
|
|
dtvsize = dtv[1];
|
|
|
|
tlsend = (Elf_Addr) tls;
|
|
|
|
tlsstart = tlsend - size;
|
|
|
|
for (i = 0; i < dtvsize; i++) {
|
2005-03-30 08:28:26 +00:00
|
|
|
if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] > tlsend)) {
|
2004-08-03 08:51:00 +00:00
|
|
|
free((void*) dtv[i+2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free((void*) tlsstart);
|
2007-05-05 08:44:59 +00:00
|
|
|
free((void*) dtv);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Allocate TLS block for module with given index.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
allocate_module_tls(int index)
|
|
|
|
{
|
|
|
|
Obj_Entry* obj;
|
|
|
|
char* p;
|
|
|
|
|
|
|
|
for (obj = obj_list; obj; obj = obj->next) {
|
|
|
|
if (obj->tlsindex == index)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!obj) {
|
|
|
|
_rtld_error("Can't find module with TLS index %d", index);
|
|
|
|
die();
|
|
|
|
}
|
|
|
|
|
|
|
|
p = malloc(obj->tlssize);
|
2010-05-18 08:55:23 +00:00
|
|
|
if (p == NULL) {
|
|
|
|
_rtld_error("Cannot allocate TLS block for index %d", index);
|
|
|
|
die();
|
|
|
|
}
|
2004-08-03 08:51:00 +00:00
|
|
|
memcpy(p, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
allocate_tls_offset(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
size_t off;
|
|
|
|
|
|
|
|
if (obj->tls_done)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (obj->tlssize == 0) {
|
|
|
|
obj->tls_done = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->tlsindex == 1)
|
|
|
|
off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
|
|
|
|
else
|
|
|
|
off = calculate_tls_offset(tls_last_offset, tls_last_size,
|
|
|
|
obj->tlssize, obj->tlsalign);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have already fixed the size of the static TLS block, we
|
|
|
|
* must stay within that size. When allocating the static TLS, we
|
|
|
|
* leave a small amount of space spare to be used for dynamically
|
|
|
|
* loading modules which use static TLS.
|
|
|
|
*/
|
|
|
|
if (tls_static_space) {
|
|
|
|
if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tls_last_offset = obj->tlsoffset = off;
|
|
|
|
tls_last_size = obj->tlssize;
|
|
|
|
obj->tls_done = true;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-02-27 12:55:40 +00:00
|
|
|
void
|
|
|
|
free_tls_offset(Obj_Entry *obj)
|
|
|
|
{
|
2010-02-16 02:48:11 +00:00
|
|
|
|
2005-02-27 12:55:40 +00:00
|
|
|
/*
|
|
|
|
* If we were the last thing to allocate out of the static TLS
|
|
|
|
* block, we give our space back to the 'allocator'. This is a
|
|
|
|
* simplistic workaround to allow libGL.so.1 to be loaded and
|
2010-02-16 02:48:11 +00:00
|
|
|
* unloaded multiple times.
|
2005-02-27 12:55:40 +00:00
|
|
|
*/
|
|
|
|
if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
|
|
|
|
== calculate_tls_end(tls_last_offset, tls_last_size)) {
|
|
|
|
tls_last_offset -= obj->tlssize;
|
|
|
|
tls_last_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
void *
|
|
|
|
_rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
2005-03-20 23:28:25 +00:00
|
|
|
void *ret;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2005-03-20 23:28:25 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
ret = allocate_tls(obj_list, oldtls, tcbsize, tcbalign);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
return (ret);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2005-03-20 23:28:25 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2004-08-03 08:51:00 +00:00
|
|
|
free_tls(tcb, tcbsize, tcbalign);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
object_add_name(Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
Name_Entry *entry;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
len = strlen(name);
|
|
|
|
entry = malloc(sizeof(Name_Entry) + len);
|
|
|
|
|
|
|
|
if (entry != NULL) {
|
|
|
|
strcpy(entry->name, name);
|
|
|
|
STAILQ_INSERT_TAIL(&obj->names, entry, link);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
object_match_name(const Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
Name_Entry *entry;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(entry, &obj->names, link) {
|
|
|
|
if (strcmp(name, entry->name) == 0)
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
locate_dependency(const Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
const Objlist_Entry *entry;
|
|
|
|
const Needed_Entry *needed;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(entry, &list_main, link) {
|
|
|
|
if (object_match_name(entry->obj, name))
|
|
|
|
return entry->obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (needed = obj->needed; needed != NULL; needed = needed->next) {
|
2011-01-30 16:14:09 +00:00
|
|
|
if (strcmp(obj->strtab + needed->name, name) == 0 ||
|
|
|
|
(needed->obj != NULL && object_match_name(needed->obj, name))) {
|
|
|
|
/*
|
|
|
|
* If there is DT_NEEDED for the name we are looking for,
|
|
|
|
* we are all set. Note that object might not be found if
|
|
|
|
* dependency was not loaded yet, so the function can
|
|
|
|
* return NULL here. This is expected and handled
|
2011-01-30 16:21:25 +00:00
|
|
|
* properly by the caller.
|
2011-01-30 16:14:09 +00:00
|
|
|
*/
|
|
|
|
return (needed->obj);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2010-06-28 01:40:56 +00:00
|
|
|
_rtld_error("%s: Unexpected inconsistency: dependency %s not found",
|
|
|
|
obj->path, name);
|
|
|
|
die();
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
|
|
|
|
const Elf_Vernaux *vna)
|
|
|
|
{
|
|
|
|
const Elf_Verdef *vd;
|
|
|
|
const char *vername;
|
|
|
|
|
|
|
|
vername = refobj->strtab + vna->vna_name;
|
|
|
|
vd = depobj->verdef;
|
|
|
|
if (vd == NULL) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: version %s required by %s not defined",
|
|
|
|
depobj->path, vername, refobj->path);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
for (;;) {
|
|
|
|
if (vd->vd_version != VER_DEF_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
|
2005-12-18 19:43:33 +00:00
|
|
|
depobj->path, vd->vd_version);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
if (vna->vna_hash == vd->vd_hash) {
|
|
|
|
const Elf_Verdaux *aux = (const Elf_Verdaux *)
|
|
|
|
((char *)vd + vd->vd_aux);
|
|
|
|
if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
if (vna->vna_flags & VER_FLG_WEAK)
|
|
|
|
return (0);
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: version %s required by %s not found",
|
|
|
|
depobj->path, vername, refobj->path);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_verify_object_versions(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
const Elf_Verneed *vn;
|
|
|
|
const Elf_Verdef *vd;
|
|
|
|
const Elf_Verdaux *vda;
|
|
|
|
const Elf_Vernaux *vna;
|
|
|
|
const Obj_Entry *depobj;
|
|
|
|
int maxvernum, vernum;
|
|
|
|
|
|
|
|
maxvernum = 0;
|
|
|
|
/*
|
|
|
|
* Walk over defined and required version records and figure out
|
|
|
|
* max index used by any of them. Do very basic sanity checking
|
|
|
|
* while there.
|
|
|
|
*/
|
|
|
|
vn = obj->verneed;
|
|
|
|
while (vn != NULL) {
|
|
|
|
if (vn->vn_version != VER_NEED_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
|
2005-12-18 19:43:33 +00:00
|
|
|
obj->path, vn->vn_version);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
|
|
|
|
for (;;) {
|
|
|
|
vernum = VER_NEED_IDX(vna->vna_other);
|
|
|
|
if (vernum > maxvernum)
|
|
|
|
maxvernum = vernum;
|
|
|
|
if (vna->vna_next == 0)
|
|
|
|
break;
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
|
|
|
|
}
|
|
|
|
if (vn->vn_next == 0)
|
|
|
|
break;
|
|
|
|
vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = obj->verdef;
|
|
|
|
while (vd != NULL) {
|
|
|
|
if (vd->vd_version != VER_DEF_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
|
|
|
|
obj->path, vd->vd_version);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
vernum = VER_DEF_IDX(vd->vd_ndx);
|
|
|
|
if (vernum > maxvernum)
|
|
|
|
maxvernum = vernum;
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (maxvernum == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Store version information in array indexable by version index.
|
|
|
|
* Verify that object version requirements are satisfied along the
|
|
|
|
* way.
|
|
|
|
*/
|
|
|
|
obj->vernum = maxvernum + 1;
|
|
|
|
obj->vertab = calloc(obj->vernum, sizeof(Ver_Entry));
|
|
|
|
|
|
|
|
vd = obj->verdef;
|
|
|
|
while (vd != NULL) {
|
|
|
|
if ((vd->vd_flags & VER_FLG_BASE) == 0) {
|
|
|
|
vernum = VER_DEF_IDX(vd->vd_ndx);
|
|
|
|
assert(vernum <= maxvernum);
|
|
|
|
vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux);
|
|
|
|
obj->vertab[vernum].hash = vd->vd_hash;
|
|
|
|
obj->vertab[vernum].name = obj->strtab + vda->vda_name;
|
|
|
|
obj->vertab[vernum].file = NULL;
|
|
|
|
obj->vertab[vernum].flags = 0;
|
|
|
|
}
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
vn = obj->verneed;
|
|
|
|
while (vn != NULL) {
|
|
|
|
depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
|
2011-01-30 16:14:09 +00:00
|
|
|
if (depobj == NULL)
|
|
|
|
return (-1);
|
2005-12-18 19:43:33 +00:00
|
|
|
vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
|
|
|
|
for (;;) {
|
|
|
|
if (check_object_provided_version(obj, depobj, vna))
|
|
|
|
return (-1);
|
|
|
|
vernum = VER_NEED_IDX(vna->vna_other);
|
|
|
|
assert(vernum <= maxvernum);
|
|
|
|
obj->vertab[vernum].hash = vna->vna_hash;
|
|
|
|
obj->vertab[vernum].name = obj->strtab + vna->vna_name;
|
|
|
|
obj->vertab[vernum].file = obj->strtab + vn->vn_file;
|
|
|
|
obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
|
|
|
|
VER_INFO_HIDDEN : 0;
|
|
|
|
if (vna->vna_next == 0)
|
|
|
|
break;
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
|
|
|
|
}
|
|
|
|
if (vn->vn_next == 0)
|
|
|
|
break;
|
|
|
|
vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_verify_versions(const Objlist *objlist)
|
|
|
|
{
|
|
|
|
Objlist_Entry *entry;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
STAILQ_FOREACH(entry, objlist, link) {
|
|
|
|
/*
|
|
|
|
* Skip dummy objects or objects that have their version requirements
|
|
|
|
* already checked.
|
|
|
|
*/
|
|
|
|
if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
|
|
|
|
continue;
|
|
|
|
if (rtld_verify_object_versions(entry->obj) == -1) {
|
|
|
|
rc = -1;
|
|
|
|
if (ld_tracing == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-04-07 23:17:00 +00:00
|
|
|
if (rc == 0 || ld_tracing != NULL)
|
|
|
|
rc = rtld_verify_object_versions(&obj_rtld);
|
2005-12-18 19:43:33 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Ver_Entry *
|
|
|
|
fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
|
|
|
|
{
|
|
|
|
Elf_Versym vernum;
|
|
|
|
|
|
|
|
if (obj->vertab) {
|
|
|
|
vernum = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (vernum >= obj->vernum) {
|
|
|
|
_rtld_error("%s: symbol %s has wrong verneed value %d",
|
|
|
|
obj->path, obj->strtab + symnum, vernum);
|
|
|
|
} else if (obj->vertab[vernum].hash != 0) {
|
|
|
|
return &obj->vertab[vernum];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-08-17 09:05:39 +00:00
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
int
|
|
|
|
_rtld_get_stack_prot(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (stack_prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2011-01-10 16:09:35 +00:00
|
|
|
map_stacks_exec(RtldLockState *lockstate)
|
2011-01-08 17:11:49 +00:00
|
|
|
{
|
|
|
|
void (*thr_map_stacks_exec)(void);
|
|
|
|
|
|
|
|
if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
|
|
|
|
return;
|
|
|
|
thr_map_stacks_exec = (void (*)(void))(uintptr_t)
|
2011-01-10 16:09:35 +00:00
|
|
|
get_program_var_addr("__pthread_map_stacks_exec", lockstate);
|
2011-01-08 17:11:49 +00:00
|
|
|
if (thr_map_stacks_exec != NULL) {
|
|
|
|
stack_prot |= PROT_EXEC;
|
|
|
|
thr_map_stacks_exec();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
void
|
|
|
|
symlook_init(SymLook *dst, const char *name)
|
|
|
|
{
|
|
|
|
|
|
|
|
bzero(dst, sizeof(*dst));
|
|
|
|
dst->name = name;
|
|
|
|
dst->hash = elf_hash(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
symlook_init_from_req(SymLook *dst, const SymLook *src)
|
|
|
|
{
|
|
|
|
|
|
|
|
dst->name = src->name;
|
|
|
|
dst->hash = src->hash;
|
|
|
|
dst->ventry = src->ventry;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->defobj_out = NULL;
|
|
|
|
dst->sym_out = NULL;
|
|
|
|
dst->lockstate = src->lockstate;
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:38:02 +00:00
|
|
|
/*
|
|
|
|
* Overrides for libc_pic-provided functions.
|
|
|
|
*/
|
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
int
|
|
|
|
__getosreldate(void)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
int oid[2];
|
|
|
|
int error, osrel;
|
|
|
|
|
|
|
|
if (osreldate != 0)
|
|
|
|
return (osreldate);
|
|
|
|
|
|
|
|
oid[0] = CTL_KERN;
|
|
|
|
oid[1] = KERN_OSRELDATE;
|
|
|
|
osrel = 0;
|
|
|
|
len = sizeof(osrel);
|
|
|
|
error = sysctl(oid, 2, &osrel, &len, NULL, 0);
|
|
|
|
if (error == 0 && osrel > 0 && len == sizeof(osrel))
|
|
|
|
osreldate = osrel;
|
|
|
|
return (osreldate);
|
|
|
|
}
|
2010-08-23 15:38:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No unresolved symbols for rtld.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
__pthread_cxa_finalize(struct dl_phdr_info *a)
|
|
|
|
{
|
|
|
|
}
|