1998-03-07 19:24:35 +00:00
|
|
|
/*-
|
2000-01-09 21:13:48 +00:00
|
|
|
* Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
|
1998-03-07 19:24:35 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dynamic linker for ELF.
|
|
|
|
*
|
|
|
|
* John Polstra <jdp@polstra.com>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __GNUC__
|
|
|
|
#error "GCC is needed to compile this file"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/mman.h>
|
1999-08-30 01:50:41 +00:00
|
|
|
#include <sys/stat.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#include <err.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include "debug.h"
|
|
|
|
#include "rtld.h"
|
|
|
|
|
1999-07-14 04:09:11 +00:00
|
|
|
#define END_SYM "_end"
|
1999-08-30 01:54:13 +00:00
|
|
|
#define PATH_RTLD "/usr/libexec/ld-elf.so.1"
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Types. */
|
|
|
|
typedef void (*func_ptr_type)();
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* This structure provides a reentrant way to keep a list of objects and
|
|
|
|
* check which ones have already been processed in some way.
|
|
|
|
*/
|
|
|
|
typedef struct Struct_DoneList {
|
2000-09-19 04:27:16 +00:00
|
|
|
const Obj_Entry **objs; /* Array of object pointers */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
unsigned int num_alloc; /* Allocated size of the array */
|
|
|
|
unsigned int num_used; /* Number of array slots used */
|
|
|
|
} DoneList;
|
1999-12-27 04:44:04 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Function declarations.
|
|
|
|
*/
|
1999-04-21 04:06:57 +00:00
|
|
|
static const char *basename(const char *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static void die(void);
|
|
|
|
static void digest_dynamic(Obj_Entry *);
|
1999-07-18 00:02:19 +00:00
|
|
|
static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *dlcheck(void *);
|
2000-09-19 04:27:16 +00:00
|
|
|
static bool donelist_check(DoneList *, const Obj_Entry *);
|
2001-01-05 04:36:17 +00:00
|
|
|
static void errmsg_restore(char *);
|
|
|
|
static char *errmsg_save(void);
|
1998-03-07 19:24:35 +00:00
|
|
|
static char *find_library(const char *, const Obj_Entry *);
|
1998-09-05 03:31:00 +00:00
|
|
|
static const char *gethints(void);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void init_dag(Obj_Entry *);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static void init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static void init_rtld(caddr_t);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void initlist_add_neededs(Needed_Entry *needed, Objlist *list);
|
|
|
|
static void initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail,
|
|
|
|
Objlist *list);
|
1998-09-04 19:03:57 +00:00
|
|
|
static bool is_exported(const Elf_Sym *);
|
1998-09-02 02:00:20 +00:00
|
|
|
static void linkmap_add(Obj_Entry *);
|
|
|
|
static void linkmap_delete(Obj_Entry *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static int load_needed_objects(Obj_Entry *);
|
1998-09-22 02:09:56 +00:00
|
|
|
static int load_preload_objects(void);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *load_object(char *);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static void lock_check(void);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_from_addr(const void *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_call_fini(Objlist *);
|
|
|
|
static void objlist_call_init(Objlist *);
|
|
|
|
static void objlist_clear(Objlist *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_init(Objlist *);
|
|
|
|
static void objlist_push_head(Objlist *, Obj_Entry *);
|
|
|
|
static void objlist_push_tail(Objlist *, Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void objlist_remove(Objlist *, Obj_Entry *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_remove_unref(Objlist *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static int relocate_objects(Obj_Entry *, bool);
|
|
|
|
static void rtld_exit(void);
|
|
|
|
static char *search_library_path(const char *, const char *);
|
2002-02-04 10:33:48 +00:00
|
|
|
static const void **get_program_var_addr(const char *name);
|
1999-04-21 04:06:57 +00:00
|
|
|
static void set_program_var(const char *, const void *);
|
2000-09-19 04:27:16 +00:00
|
|
|
static const Elf_Sym *symlook_default(const char *, unsigned long hash,
|
|
|
|
const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt);
|
1999-08-30 01:48:19 +00:00
|
|
|
static const Elf_Sym *symlook_list(const char *, unsigned long,
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
Objlist *, const Obj_Entry **, bool in_plt, DoneList *);
|
1998-05-01 08:39:27 +00:00
|
|
|
static void trace_loaded_objects(Obj_Entry *obj);
|
2000-01-09 21:13:48 +00:00
|
|
|
static void unload_object(Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void unref_dag(Obj_Entry *);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
void r_debug_state(struct r_debug*, struct link_map*);
|
2002-02-04 01:41:35 +00:00
|
|
|
void xprintf(const char *, ...) __printflike(1, 2);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Data declarations.
|
|
|
|
*/
|
|
|
|
static char *error_message; /* Message for dlerror(), or NULL */
|
1998-04-30 07:48:02 +00:00
|
|
|
struct r_debug r_debug; /* for GDB; */
|
1998-03-07 19:24:35 +00:00
|
|
|
static bool trust; /* False for setuid and setgid programs */
|
|
|
|
static char *ld_bind_now; /* Environment variable for immediate binding */
|
|
|
|
static char *ld_debug; /* Environment variable for debugging */
|
|
|
|
static char *ld_library_path; /* Environment variable for search path */
|
1998-09-22 02:09:56 +00:00
|
|
|
static char *ld_preload; /* Environment variable for libraries to
|
|
|
|
load first */
|
1998-05-01 08:39:27 +00:00
|
|
|
static char *ld_tracing; /* Called from ldd to print libs */
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_list; /* Head of linked list of shared objects */
|
|
|
|
static Obj_Entry **obj_tail; /* Link field of last object in list */
|
|
|
|
static Obj_Entry *obj_main; /* The main program shared object */
|
|
|
|
static Obj_Entry obj_rtld; /* The dynamic linker shared object */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static unsigned int obj_count; /* Number of objects in obj_list */
|
1999-08-30 01:48:19 +00:00
|
|
|
|
|
|
|
static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_global);
|
|
|
|
static Objlist list_main = /* Objects loaded at program startup */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_main);
|
2000-07-26 04:24:40 +00:00
|
|
|
static Objlist list_fini = /* Objects needing fini() calls */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_fini);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
static LockInfo lockinfo;
|
|
|
|
|
1999-04-05 02:36:40 +00:00
|
|
|
static Elf_Sym sym_zero; /* For resolving undefined weak refs. */
|
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
#define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
extern Elf_Dyn _DYNAMIC;
|
1999-04-09 00:28:43 +00:00
|
|
|
#pragma weak _DYNAMIC
|
1999-04-05 02:36:40 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* These are the functions the dynamic linker exports to application
|
|
|
|
* programs. They are the only symbols the dynamic linker is willing
|
|
|
|
* to export from itself.
|
|
|
|
*/
|
|
|
|
static func_ptr_type exports[] = {
|
|
|
|
(func_ptr_type) &_rtld_error,
|
|
|
|
(func_ptr_type) &dlclose,
|
|
|
|
(func_ptr_type) &dlerror,
|
|
|
|
(func_ptr_type) &dlopen,
|
|
|
|
(func_ptr_type) &dlsym,
|
1999-03-24 23:37:35 +00:00
|
|
|
(func_ptr_type) &dladdr,
|
1999-12-27 04:44:04 +00:00
|
|
|
(func_ptr_type) &dllockinit,
|
1998-03-07 19:24:35 +00:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Global declarations normally provided by crt1. The dynamic linker is
|
2000-01-09 21:13:48 +00:00
|
|
|
* not built with crt1, so we have to provide them ourselves.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
char *__progname;
|
|
|
|
char **environ;
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* Fill in a DoneList with an allocation large enough to hold all of
|
|
|
|
* the currently-loaded objects. Keep this as a macro since it calls
|
|
|
|
* alloca and we want that to occur within the scope of the caller.
|
|
|
|
*/
|
|
|
|
#define donelist_init(dlp) \
|
|
|
|
((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
|
|
|
|
assert((dlp)->objs != NULL), \
|
|
|
|
(dlp)->num_alloc = obj_count, \
|
|
|
|
(dlp)->num_used = 0)
|
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
static __inline void
|
|
|
|
rlock_acquire(void)
|
|
|
|
{
|
|
|
|
lockinfo.rlock_acquire(lockinfo.thelock);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
atomic_incr_int(&lockinfo.rcount);
|
|
|
|
lock_check();
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
wlock_acquire(void)
|
|
|
|
{
|
|
|
|
lockinfo.wlock_acquire(lockinfo.thelock);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
atomic_incr_int(&lockinfo.wcount);
|
|
|
|
lock_check();
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release(void)
|
1999-12-27 04:44:04 +00:00
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
atomic_decr_int(&lockinfo.rcount);
|
|
|
|
lockinfo.rlock_release(lockinfo.thelock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
wlock_release(void)
|
|
|
|
{
|
|
|
|
atomic_decr_int(&lockinfo.wcount);
|
|
|
|
lockinfo.wlock_release(lockinfo.thelock);
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Main entry point for dynamic linking. The first argument is the
|
|
|
|
* stack pointer. The stack is expected to be laid out as described
|
|
|
|
* in the SVR4 ABI specification, Intel 386 Processor Supplement.
|
|
|
|
* Specifically, the stack pointer points to a word containing
|
|
|
|
* ARGC. Following that in the stack is a null-terminated sequence
|
|
|
|
* of pointers to argument strings. Then comes a null-terminated
|
|
|
|
* sequence of pointers to environment strings. Finally, there is a
|
|
|
|
* sequence of "auxiliary vector" entries.
|
|
|
|
*
|
|
|
|
* The second argument points to a place to store the dynamic linker's
|
1998-09-04 19:03:57 +00:00
|
|
|
* exit procedure pointer and the third to a place to store the main
|
|
|
|
* program's object.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* The return value is the main program's entry point.
|
|
|
|
*/
|
|
|
|
func_ptr_type
|
1998-09-04 19:03:57 +00:00
|
|
|
_rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Auxinfo *aux_info[AT_COUNT];
|
1998-03-07 19:24:35 +00:00
|
|
|
int i;
|
|
|
|
int argc;
|
|
|
|
char **argv;
|
|
|
|
char **env;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Auxinfo *aux;
|
|
|
|
Elf_Auxinfo *auxp;
|
1999-07-18 00:02:19 +00:00
|
|
|
const char *argv0;
|
1999-08-30 01:48:19 +00:00
|
|
|
Obj_Entry *obj;
|
2000-07-26 04:24:40 +00:00
|
|
|
Obj_Entry **preload_tail;
|
|
|
|
Objlist initlist;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On entry, the dynamic linker itself has not been relocated yet.
|
|
|
|
* Be very careful not to reference any global data until after
|
|
|
|
* init_rtld has returned. It is OK to reference file-scope statics
|
|
|
|
* and string constants, and to call static and global functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Find the auxiliary vector on the stack. */
|
|
|
|
argc = *sp++;
|
|
|
|
argv = (char **) sp;
|
|
|
|
sp += argc + 1; /* Skip over arguments and NULL terminator */
|
|
|
|
env = (char **) sp;
|
|
|
|
while (*sp++ != 0) /* Skip over environment, and NULL terminator */
|
|
|
|
;
|
1998-09-04 19:03:57 +00:00
|
|
|
aux = (Elf_Auxinfo *) sp;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Digest the auxiliary vector. */
|
|
|
|
for (i = 0; i < AT_COUNT; i++)
|
|
|
|
aux_info[i] = NULL;
|
|
|
|
for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
|
|
|
|
if (auxp->a_type < AT_COUNT)
|
|
|
|
aux_info[auxp->a_type] = auxp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize and relocate ourselves. */
|
|
|
|
assert(aux_info[AT_BASE] != NULL);
|
|
|
|
init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
|
|
|
|
|
|
|
|
__progname = obj_rtld.path;
|
1999-07-18 00:02:19 +00:00
|
|
|
argv0 = argv[0] != NULL ? argv[0] : "(null)";
|
1998-03-07 19:24:35 +00:00
|
|
|
environ = env;
|
|
|
|
|
|
|
|
trust = geteuid() == getuid() && getegid() == getgid();
|
|
|
|
|
|
|
|
ld_bind_now = getenv("LD_BIND_NOW");
|
|
|
|
if (trust) {
|
|
|
|
ld_debug = getenv("LD_DEBUG");
|
|
|
|
ld_library_path = getenv("LD_LIBRARY_PATH");
|
1998-09-22 02:09:56 +00:00
|
|
|
ld_preload = getenv("LD_PRELOAD");
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
1998-05-01 08:39:27 +00:00
|
|
|
ld_tracing = getenv("LD_TRACE_LOADED_OBJECTS");
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
if (ld_debug != NULL && *ld_debug != '\0')
|
|
|
|
debug = 1;
|
|
|
|
dbg("%s is initialized, base address = %p", __progname,
|
|
|
|
(caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
|
1999-04-09 00:28:43 +00:00
|
|
|
dbg("RTLD dynamic = %p", obj_rtld.dynamic);
|
|
|
|
dbg("RTLD pltgot = %p", obj_rtld.pltgot);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Load the main program, or process its program header if it is
|
|
|
|
* already loaded.
|
|
|
|
*/
|
|
|
|
if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */
|
|
|
|
int fd = aux_info[AT_EXECFD]->a_un.a_val;
|
|
|
|
dbg("loading main program");
|
1999-08-30 01:50:41 +00:00
|
|
|
obj_main = map_object(fd, argv0, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
close(fd);
|
|
|
|
if (obj_main == NULL)
|
|
|
|
die();
|
|
|
|
} else { /* Main program already loaded. */
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Phdr *phdr;
|
1998-03-07 19:24:35 +00:00
|
|
|
int phnum;
|
|
|
|
caddr_t entry;
|
|
|
|
|
|
|
|
dbg("processing main program's program header");
|
|
|
|
assert(aux_info[AT_PHDR] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_PHNUM] != NULL);
|
|
|
|
phnum = aux_info[AT_PHNUM]->a_un.a_val;
|
|
|
|
assert(aux_info[AT_PHENT] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_ENTRY] != NULL);
|
|
|
|
entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
|
1999-07-18 00:02:19 +00:00
|
|
|
if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL)
|
|
|
|
die();
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1999-07-18 00:02:19 +00:00
|
|
|
obj_main->path = xstrdup(argv0);
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_main->mainprog = true;
|
1999-08-30 01:54:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the actual dynamic linker pathname from the executable if
|
|
|
|
* possible. (It should always be possible.) That ensures that
|
|
|
|
* gdb will find the right dynamic linker even if a non-standard
|
|
|
|
* one is being used.
|
|
|
|
*/
|
|
|
|
if (obj_main->interp != NULL &&
|
|
|
|
strcmp(obj_main->interp, obj_rtld.path) != 0) {
|
|
|
|
free(obj_rtld.path);
|
|
|
|
obj_rtld.path = xstrdup(obj_main->interp);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
digest_dynamic(obj_main);
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
linkmap_add(obj_main);
|
|
|
|
linkmap_add(&obj_rtld);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/* Link the main program into the list of objects. */
|
|
|
|
*obj_tail = obj_main;
|
|
|
|
obj_tail = &obj_main->next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count++;
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_main->refcount++;
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make sure we don't call the main program's init and fini functions. */
|
|
|
|
obj_main->init = obj_main->fini = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-05 02:36:40 +00:00
|
|
|
/* Initialize a fake symbol for resolving undefined weak references. */
|
|
|
|
sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
|
|
|
|
sym_zero.st_shndx = SHN_ABS;
|
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
dbg("loading LD_PRELOAD libraries");
|
|
|
|
if (load_preload_objects() == -1)
|
|
|
|
die();
|
2000-07-26 04:24:40 +00:00
|
|
|
preload_tail = obj_tail;
|
1998-09-22 02:09:56 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("loading needed objects");
|
|
|
|
if (load_needed_objects(obj_main) == -1)
|
|
|
|
die();
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of all objects loaded at startup. */
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next)
|
|
|
|
objlist_push_tail(&list_main, obj);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
if (ld_tracing) { /* We're done */
|
|
|
|
trace_loaded_objects(obj_main);
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (relocate_objects(obj_main,
|
|
|
|
ld_bind_now != NULL && *ld_bind_now != '\0') == -1)
|
|
|
|
die();
|
|
|
|
|
|
|
|
dbg("doing copy relocations");
|
|
|
|
if (do_copy_relocations(obj_main) == -1)
|
|
|
|
die();
|
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
dbg("initializing key program variables");
|
|
|
|
set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
|
|
|
|
set_program_var("environ", env);
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
dbg("initializing thread locks");
|
|
|
|
lockdflt_init(&lockinfo);
|
|
|
|
lockinfo.thelock = lockinfo.lock_create(lockinfo.context);
|
1999-12-27 04:44:04 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of init functions to call. */
|
|
|
|
objlist_init(&initlist);
|
|
|
|
initlist_add_objects(obj_list, preload_tail, &initlist);
|
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
|
1999-07-03 23:54:02 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_call_init(&initlist);
|
2000-01-09 21:13:48 +00:00
|
|
|
wlock_acquire();
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("transferring control to program entry point = %p", obj_main->entry);
|
|
|
|
|
|
|
|
/* Return the exit procedure and the program entry point. */
|
1998-09-04 19:03:57 +00:00
|
|
|
*exit_proc = rtld_exit;
|
|
|
|
*objp = obj_main;
|
1998-03-07 19:24:35 +00:00
|
|
|
return (func_ptr_type) obj_main->entry;
|
|
|
|
}
|
|
|
|
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr
|
1999-08-30 01:48:19 +00:00
|
|
|
_rtld_bind(Obj_Entry *obj, Elf_Word reloff)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Rel *rel;
|
|
|
|
const Elf_Sym *def;
|
1998-03-07 19:24:35 +00:00
|
|
|
const Obj_Entry *defobj;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Addr *where;
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr target;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_acquire();
|
1998-09-04 19:03:57 +00:00
|
|
|
if (obj->pltrel)
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
|
|
|
|
else
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
|
2001-05-05 23:21:05 +00:00
|
|
|
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (def == NULL)
|
|
|
|
die();
|
|
|
|
|
1999-06-25 02:53:59 +00:00
|
|
|
target = (Elf_Addr)(defobj->relocbase + def->st_value);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
|
|
|
|
defobj->strtab + def->st_name, basename(obj->path),
|
1999-06-25 02:53:59 +00:00
|
|
|
(void *)target, basename(defobj->path));
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
/*
|
|
|
|
* Write the new contents for the jmpslot. Note that depending on
|
|
|
|
* architecture, the value which we need to return back to the
|
|
|
|
* lazy binding trampoline may or may not be the target
|
|
|
|
* address. The value returned from reloc_jmpslot() is the value
|
|
|
|
* that the trampoline needs.
|
|
|
|
*/
|
|
|
|
target = reloc_jmpslot(where, target, defobj);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Error reporting function. Use it like printf. If formats the message
|
|
|
|
* into a buffer, and sets things up so that the next call to dlerror()
|
|
|
|
* will return the message.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_rtld_error(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
static char buf[512];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(buf, sizeof buf, fmt, ap);
|
|
|
|
error_message = buf;
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Return a dynamically-allocated copy of the current error message, if any.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
errmsg_save(void)
|
|
|
|
{
|
|
|
|
return error_message == NULL ? NULL : xstrdup(error_message);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore the current error message from a copy which was previously saved
|
|
|
|
* by errmsg_save(). The copy is freed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
errmsg_restore(char *saved_msg)
|
|
|
|
{
|
|
|
|
if (saved_msg == NULL)
|
|
|
|
error_message = NULL;
|
|
|
|
else {
|
|
|
|
_rtld_error("%s", saved_msg);
|
|
|
|
free(saved_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static const char *
|
|
|
|
basename(const char *name)
|
|
|
|
{
|
|
|
|
const char *p = strrchr(name, '/');
|
|
|
|
return p != NULL ? p + 1 : name;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
die(void)
|
|
|
|
{
|
|
|
|
const char *msg = dlerror();
|
|
|
|
|
|
|
|
if (msg == NULL)
|
|
|
|
msg = "Fatal error";
|
|
|
|
errx(1, "%s", msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a shared object's DYNAMIC section, and save the important
|
|
|
|
* information in its Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
digest_dynamic(Obj_Entry *obj)
|
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Dyn *dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry **needed_tail = &obj->needed;
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Dyn *dyn_rpath = NULL;
|
|
|
|
int plttype = DT_REL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
|
|
|
|
switch (dynp->d_tag) {
|
|
|
|
|
|
|
|
case DT_REL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELSZ:
|
|
|
|
obj->relsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rel));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_JMPREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->pltrel = (const Elf_Rel *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTRELSZ:
|
|
|
|
obj->pltrelsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELA:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELASZ:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->relasize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELAENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rela));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
plttype = dynp->d_un.d_val;
|
|
|
|
assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMTAB:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->symtab = (const Elf_Sym *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Sym));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRTAB:
|
|
|
|
obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRSZ:
|
|
|
|
obj->strsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_HASH:
|
|
|
|
{
|
2001-10-15 18:48:42 +00:00
|
|
|
const Elf_Hashelt *hashtab = (const Elf_Hashelt *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
obj->nbuckets = hashtab[0];
|
|
|
|
obj->nchains = hashtab[1];
|
|
|
|
obj->buckets = hashtab + 2;
|
|
|
|
obj->chains = obj->buckets + obj->nbuckets;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_NEEDED:
|
1999-04-09 00:28:43 +00:00
|
|
|
if (!obj->rtld) {
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_tail = nep;
|
|
|
|
needed_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTGOT:
|
1999-04-09 00:28:43 +00:00
|
|
|
obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_TEXTREL:
|
|
|
|
obj->textrel = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMBOLIC:
|
|
|
|
obj->symbolic = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RPATH:
|
|
|
|
/*
|
|
|
|
* We have to wait until later to process this, because we
|
|
|
|
* might not have gotten the address of the string table yet.
|
|
|
|
*/
|
|
|
|
dyn_rpath = dynp;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SONAME:
|
|
|
|
/* Not used by the dynamic linker. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_INIT:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_FINI:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_DEBUG:
|
|
|
|
/* XXX - not implemented yet */
|
1998-04-30 07:48:02 +00:00
|
|
|
dbg("Filling in DT_DEBUG entry");
|
1998-09-04 19:03:57 +00:00
|
|
|
((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
1998-09-04 19:03:57 +00:00
|
|
|
|
|
|
|
default:
|
2001-10-29 10:10:10 +00:00
|
|
|
dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
|
|
|
|
(long)dynp->d_tag);
|
1999-09-04 20:14:48 +00:00
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->traced = false;
|
|
|
|
|
|
|
|
if (plttype == DT_RELA) {
|
|
|
|
obj->pltrela = (const Elf_Rela *) obj->pltrel;
|
|
|
|
obj->pltrel = NULL;
|
|
|
|
obj->pltrelasize = obj->pltrelsize;
|
|
|
|
obj->pltrelsize = 0;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (dyn_rpath != NULL)
|
|
|
|
obj->rpath = obj->strtab + dyn_rpath->d_un.d_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a shared object's program header. This is used only for the
|
|
|
|
* main program, when the kernel has already loaded the main program
|
|
|
|
* into memory before calling the dynamic linker. It creates and
|
|
|
|
* returns an Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
1999-07-18 00:02:19 +00:00
|
|
|
digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1999-08-30 01:48:19 +00:00
|
|
|
Obj_Entry *obj;
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Phdr *phlimit = phdr + phnum;
|
|
|
|
const Elf_Phdr *ph;
|
1998-03-07 19:24:35 +00:00
|
|
|
int nsegs = 0;
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
obj = obj_new();
|
1998-03-07 19:24:35 +00:00
|
|
|
for (ph = phdr; ph < phlimit; ph++) {
|
|
|
|
switch (ph->p_type) {
|
|
|
|
|
|
|
|
case PT_PHDR:
|
1999-07-18 00:02:19 +00:00
|
|
|
if ((const Elf_Phdr *)ph->p_vaddr != phdr) {
|
|
|
|
_rtld_error("%s: invalid PT_PHDR", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->phdr = (const Elf_Phdr *) ph->p_vaddr;
|
1998-03-07 19:24:35 +00:00
|
|
|
obj->phsize = ph->p_memsz;
|
|
|
|
break;
|
|
|
|
|
1999-08-30 01:54:13 +00:00
|
|
|
case PT_INTERP:
|
|
|
|
obj->interp = (const char *) ph->p_vaddr;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case PT_LOAD:
|
1999-07-18 00:02:19 +00:00
|
|
|
if (nsegs >= 2) {
|
|
|
|
_rtld_error("%s: too many PT_LOAD segments", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
if (nsegs == 0) { /* First load segment */
|
|
|
|
obj->vaddrbase = trunc_page(ph->p_vaddr);
|
|
|
|
obj->mapbase = (caddr_t) obj->vaddrbase;
|
|
|
|
obj->relocbase = obj->mapbase - obj->vaddrbase;
|
|
|
|
obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
} else { /* Last load segment */
|
|
|
|
obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
}
|
|
|
|
nsegs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_DYNAMIC:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->dynamic = (const Elf_Dyn *) ph->p_vaddr;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1999-07-18 00:02:19 +00:00
|
|
|
if (nsegs < 2) {
|
|
|
|
_rtld_error("%s: too few PT_LOAD segments", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
obj->entry = entry;
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
dlcheck(void *handle)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next)
|
|
|
|
if (obj == (Obj_Entry *) handle)
|
|
|
|
break;
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
|
1998-03-07 19:24:35 +00:00
|
|
|
_rtld_error("Invalid shared object handle %p", handle);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* If the given object is already in the donelist, return true. Otherwise
|
|
|
|
* add the object to the list and return false.
|
|
|
|
*/
|
|
|
|
static bool
|
2000-09-19 04:27:16 +00:00
|
|
|
donelist_check(DoneList *dlp, const Obj_Entry *obj)
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dlp->num_used; i++)
|
|
|
|
if (dlp->objs[i] == obj)
|
|
|
|
return true;
|
|
|
|
/*
|
|
|
|
* Our donelist allocation should always be sufficient. But if
|
|
|
|
* our threads locking isn't working properly, more shared objects
|
|
|
|
* could have been loaded since we allocated the list. That should
|
|
|
|
* never happen, but we'll handle it properly just in case it does.
|
|
|
|
*/
|
|
|
|
if (dlp->num_used < dlp->num_alloc)
|
|
|
|
dlp->objs[dlp->num_used++] = obj;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Hash function for symbol table lookup. Don't even think about changing
|
|
|
|
* this. It is specified by the System V ABI.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
unsigned long
|
1998-03-07 19:24:35 +00:00
|
|
|
elf_hash(const char *name)
|
|
|
|
{
|
|
|
|
const unsigned char *p = (const unsigned char *) name;
|
|
|
|
unsigned long h = 0;
|
|
|
|
unsigned long g;
|
|
|
|
|
|
|
|
while (*p != '\0') {
|
|
|
|
h = (h << 4) + *p++;
|
|
|
|
if ((g = h & 0xf0000000) != 0)
|
|
|
|
h ^= g >> 24;
|
|
|
|
h &= ~g;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the library with the given name, and return its full pathname.
|
|
|
|
* The returned string is dynamically allocated. Generates an error
|
|
|
|
* message and returns NULL if the library cannot be found.
|
|
|
|
*
|
|
|
|
* If the second argument is non-NULL, then it refers to an already-
|
|
|
|
* loaded shared object, whose library search path will be searched.
|
1998-09-05 03:31:00 +00:00
|
|
|
*
|
|
|
|
* The search order is:
|
1999-11-19 04:45:07 +00:00
|
|
|
* rpath in the referencing file
|
1998-09-05 03:31:00 +00:00
|
|
|
* LD_LIBRARY_PATH
|
|
|
|
* ldconfig hints
|
|
|
|
* /usr/lib
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
find_library(const char *name, const Obj_Entry *refobj)
|
|
|
|
{
|
|
|
|
char *pathname;
|
|
|
|
|
|
|
|
if (strchr(name, '/') != NULL) { /* Hard coded pathname */
|
|
|
|
if (name[0] != '/' && !trust) {
|
|
|
|
_rtld_error("Absolute pathname required for shared object \"%s\"",
|
|
|
|
name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return xstrdup(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
dbg(" Searching for \"%s\"", name);
|
|
|
|
|
2002-01-25 16:35:43 +00:00
|
|
|
if ((pathname = search_library_path(name, ld_library_path)) != NULL ||
|
|
|
|
(refobj != NULL &&
|
1998-03-07 19:24:35 +00:00
|
|
|
(pathname = search_library_path(name, refobj->rpath)) != NULL) ||
|
1999-04-09 06:42:00 +00:00
|
|
|
(pathname = search_library_path(name, gethints())) != NULL ||
|
1998-03-07 19:24:35 +00:00
|
|
|
(pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL)
|
|
|
|
return pathname;
|
|
|
|
|
|
|
|
_rtld_error("Shared object \"%s\" not found", name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a symbol number in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *
|
2000-09-19 04:27:16 +00:00
|
|
|
find_symdef(unsigned long symnum, const Obj_Entry *refobj,
|
2001-05-05 23:21:05 +00:00
|
|
|
const Obj_Entry **defobj_out, bool in_plt, SymCache *cache)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *ref;
|
1999-08-30 01:24:08 +00:00
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
1998-03-07 19:24:35 +00:00
|
|
|
const char *name;
|
|
|
|
unsigned long hash;
|
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
/*
|
|
|
|
* If we have already found this symbol, get the information from
|
|
|
|
* the cache.
|
|
|
|
*/
|
|
|
|
if (symnum >= refobj->nchains)
|
|
|
|
return NULL; /* Bad object */
|
|
|
|
if (cache != NULL && cache[symnum].sym != NULL) {
|
|
|
|
*defobj_out = cache[symnum].obj;
|
|
|
|
return cache[symnum].sym;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
ref = refobj->symtab + symnum;
|
|
|
|
name = refobj->strtab + ref->st_name;
|
|
|
|
hash = elf_hash(name);
|
1999-08-30 01:25:38 +00:00
|
|
|
defobj = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
def = symlook_default(name, hash, refobj, &defobj, in_plt);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:24:08 +00:00
|
|
|
/*
|
1999-08-30 01:48:19 +00:00
|
|
|
* If we found no definition and the reference is weak, treat the
|
1999-08-30 01:24:08 +00:00
|
|
|
* symbol as having the value zero.
|
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
|
|
|
|
def = &sym_zero;
|
|
|
|
defobj = obj_main;
|
1999-04-05 02:36:40 +00:00
|
|
|
}
|
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
if (def != NULL) {
|
1999-08-30 01:48:19 +00:00
|
|
|
*defobj_out = defobj;
|
2001-05-05 23:21:05 +00:00
|
|
|
/* Record the information in the cache to avoid subsequent lookups. */
|
|
|
|
if (cache != NULL) {
|
|
|
|
cache[symnum].sym = def;
|
|
|
|
cache[symnum].obj = defobj;
|
|
|
|
}
|
2001-10-15 18:48:42 +00:00
|
|
|
} else {
|
|
|
|
if (refobj != &obj_rtld)
|
|
|
|
_rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
return def;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-09-05 03:31:00 +00:00
|
|
|
/*
|
|
|
|
* Return the search path from the ldconfig hints file, reading it if
|
|
|
|
* necessary. Returns NULL if there are problems with the hints file,
|
|
|
|
* or if the search path there is empty.
|
|
|
|
*/
|
|
|
|
static const char *
|
|
|
|
gethints(void)
|
|
|
|
{
|
|
|
|
static char *hints;
|
|
|
|
|
|
|
|
if (hints == NULL) {
|
|
|
|
int fd;
|
|
|
|
struct elfhints_hdr hdr;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
/* Keep from trying again in case the hints file is bad. */
|
|
|
|
hints = "";
|
|
|
|
|
|
|
|
if ((fd = open(_PATH_ELF_HINTS, O_RDONLY)) == -1)
|
|
|
|
return NULL;
|
|
|
|
if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
|
|
|
|
hdr.magic != ELFHINTS_MAGIC ||
|
|
|
|
hdr.version != 1) {
|
|
|
|
close(fd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
p = xmalloc(hdr.dirlistlen + 1);
|
|
|
|
if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
|
|
|
|
read(fd, p, hdr.dirlistlen + 1) != hdr.dirlistlen + 1) {
|
|
|
|
free(p);
|
|
|
|
close(fd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
hints = p;
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
return hints[0] != '\0' ? hints : NULL;
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
init_dag(Obj_Entry *root)
|
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
DoneList donelist;
|
|
|
|
|
|
|
|
donelist_init(&donelist);
|
|
|
|
init_dag1(root, root, &donelist);
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *dlp)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
|
|
|
const Needed_Entry *needed;
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
if (donelist_check(dlp, obj))
|
1999-08-30 01:48:19 +00:00
|
|
|
return;
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(&obj->dldags, root);
|
|
|
|
objlist_push_tail(&root->dagmembers, obj);
|
1999-08-30 01:48:19 +00:00
|
|
|
for (needed = obj->needed; needed != NULL; needed = needed->next)
|
|
|
|
if (needed->obj != NULL)
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
init_dag1(root, needed->obj, dlp);
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Initialize the dynamic linker. The argument is the address at which
|
|
|
|
* the dynamic linker has been mapped into memory. The primary task of
|
|
|
|
* this function is to relocate the dynamic linker.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
init_rtld(caddr_t mapbase)
|
|
|
|
{
|
1998-09-15 21:07:52 +00:00
|
|
|
/*
|
|
|
|
* Conjure up an Obj_Entry structure for the dynamic linker.
|
|
|
|
*
|
|
|
|
* The "path" member is supposed to be dynamically-allocated, but we
|
|
|
|
* aren't yet initialized sufficiently to do that. Below we will
|
|
|
|
* replace the static version with a dynamically-allocated copy.
|
|
|
|
*/
|
1999-08-30 01:54:13 +00:00
|
|
|
obj_rtld.path = PATH_RTLD;
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_rtld.rtld = true;
|
|
|
|
obj_rtld.mapbase = mapbase;
|
1999-04-09 00:28:43 +00:00
|
|
|
#ifdef PIC
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_rtld.relocbase = mapbase;
|
1998-09-04 19:03:57 +00:00
|
|
|
#endif
|
1999-04-09 00:28:43 +00:00
|
|
|
if (&_DYNAMIC != 0) {
|
|
|
|
obj_rtld.dynamic = rtld_dynamic(&obj_rtld);
|
|
|
|
digest_dynamic(&obj_rtld);
|
|
|
|
assert(obj_rtld.needed == NULL);
|
|
|
|
assert(!obj_rtld.textrel);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
/*
|
|
|
|
* Temporarily put the dynamic linker entry into the object list, so
|
|
|
|
* that symbols can be found.
|
|
|
|
*/
|
|
|
|
obj_list = &obj_rtld;
|
|
|
|
obj_tail = &obj_rtld.next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count = 1;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
relocate_objects(&obj_rtld, true);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Make the object list empty again. */
|
|
|
|
obj_list = NULL;
|
|
|
|
obj_tail = &obj_list;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count = 0;
|
1998-04-30 07:48:02 +00:00
|
|
|
|
1998-09-15 21:07:52 +00:00
|
|
|
/* Replace the path with a dynamically allocated copy. */
|
|
|
|
obj_rtld.path = xstrdup(obj_rtld.path);
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
r_debug.r_brk = r_debug_state;
|
|
|
|
r_debug.r_state = RT_CONSISTENT;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Add the init functions from a needed object list (and its recursive
|
|
|
|
* needed objects) to "list". This is not used directly; it is a helper
|
|
|
|
* function for initlist_add_objects(). The write lock must be held
|
|
|
|
* when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initlist_add_neededs(Needed_Entry *needed, Objlist *list)
|
|
|
|
{
|
|
|
|
/* Recursively process the successor needed objects. */
|
|
|
|
if (needed->next != NULL)
|
|
|
|
initlist_add_neededs(needed->next, list);
|
|
|
|
|
|
|
|
/* Process the current needed object. */
|
|
|
|
if (needed->obj != NULL)
|
|
|
|
initlist_add_objects(needed->obj, &needed->obj->next, list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan all of the DAGs rooted in the range of objects from "obj" to
|
|
|
|
* "tail" and add their init functions to "list". This recurses over
|
|
|
|
* the DAGs and ensure the proper init ordering such that each object's
|
|
|
|
* needed libraries are initialized before the object itself. At the
|
|
|
|
* same time, this function adds the objects to the global finalization
|
|
|
|
* list "list_fini" in the opposite order. The write lock must be
|
|
|
|
* held when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list)
|
|
|
|
{
|
|
|
|
if (obj->init_done)
|
|
|
|
return;
|
|
|
|
obj->init_done = true;
|
|
|
|
|
|
|
|
/* Recursively process the successor objects. */
|
|
|
|
if (&obj->next != tail)
|
|
|
|
initlist_add_objects(obj->next, tail, list);
|
|
|
|
|
|
|
|
/* Recursively process the needed objects. */
|
|
|
|
if (obj->needed != NULL)
|
|
|
|
initlist_add_neededs(obj->needed, list);
|
|
|
|
|
|
|
|
/* Add the object to the init list. */
|
|
|
|
if (obj->init != NULL)
|
|
|
|
objlist_push_tail(list, obj);
|
|
|
|
|
|
|
|
/* Add the object to the global fini list in the reverse order. */
|
|
|
|
if (obj->fini != NULL)
|
|
|
|
objlist_push_head(&list_fini, obj);
|
|
|
|
}
|
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
#ifndef FPTR_TARGET
|
|
|
|
#define FPTR_TARGET(f) ((Elf_Addr) (f))
|
|
|
|
#endif
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static bool
|
1998-09-04 19:03:57 +00:00
|
|
|
is_exported(const Elf_Sym *def)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2001-10-15 18:48:42 +00:00
|
|
|
Elf_Addr value;
|
1998-03-07 19:24:35 +00:00
|
|
|
const func_ptr_type *p;
|
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
value = (Elf_Addr)(obj_rtld.relocbase + def->st_value);
|
|
|
|
for (p = exports; *p != NULL; p++)
|
|
|
|
if (FPTR_TARGET(*p) == value)
|
1998-03-07 19:24:35 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a shared object, traverse its list of needed objects, and load
|
|
|
|
* each of them. Returns 0 on success. Generates an error message and
|
|
|
|
* returns -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
load_needed_objects(Obj_Entry *first)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = first; obj != NULL; obj = obj->next) {
|
|
|
|
Needed_Entry *needed;
|
|
|
|
|
|
|
|
for (needed = obj->needed; needed != NULL; needed = needed->next) {
|
|
|
|
const char *name = obj->strtab + needed->name;
|
|
|
|
char *path = find_library(name, obj);
|
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
needed->obj = NULL;
|
|
|
|
if (path == NULL && !ld_tracing)
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
if (path) {
|
|
|
|
needed->obj = load_object(path);
|
|
|
|
if (needed->obj == NULL && !ld_tracing)
|
|
|
|
return -1; /* XXX - cleanup */
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
static int
|
|
|
|
load_preload_objects(void)
|
|
|
|
{
|
|
|
|
char *p = ld_preload;
|
2000-01-22 22:20:05 +00:00
|
|
|
static const char delim[] = " \t:;";
|
1998-09-22 02:09:56 +00:00
|
|
|
|
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
while (*p != '\0') {
|
2000-01-22 22:20:05 +00:00
|
|
|
size_t len = strcspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
char *path;
|
|
|
|
char savech;
|
|
|
|
|
|
|
|
savech = p[len];
|
|
|
|
p[len] = '\0';
|
|
|
|
if ((path = find_library(p, NULL)) == NULL)
|
|
|
|
return -1;
|
|
|
|
if (load_object(path) == NULL)
|
|
|
|
return -1; /* XXX - cleanup */
|
|
|
|
p[len] = savech;
|
|
|
|
p += len;
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Load a shared object into memory, if it is not already loaded. The
|
|
|
|
* argument must be a string allocated on the heap. This function assumes
|
|
|
|
* responsibility for freeing it when necessary.
|
|
|
|
*
|
|
|
|
* Returns a pointer to the Obj_Entry for the object. Returns NULL
|
|
|
|
* on failure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
|
|
|
load_object(char *path)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
1999-08-30 01:50:41 +00:00
|
|
|
int fd = -1;
|
|
|
|
struct stat sb;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
for (obj = obj_list->next; obj != NULL; obj = obj->next)
|
|
|
|
if (strcmp(obj->path, path) == 0)
|
|
|
|
break;
|
|
|
|
|
1999-08-30 01:50:41 +00:00
|
|
|
/*
|
|
|
|
* If we didn't find a match by pathname, open the file and check
|
|
|
|
* again by device and inode. This avoids false mismatches caused
|
|
|
|
* by multiple links or ".." in pathnames.
|
|
|
|
*
|
|
|
|
* To avoid a race, we open the file and use fstat() rather than
|
|
|
|
* using stat().
|
|
|
|
*/
|
|
|
|
if (obj == NULL) {
|
1998-03-07 19:24:35 +00:00
|
|
|
if ((fd = open(path, O_RDONLY)) == -1) {
|
|
|
|
_rtld_error("Cannot open \"%s\"", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1999-08-30 01:50:41 +00:00
|
|
|
if (fstat(fd, &sb) == -1) {
|
|
|
|
_rtld_error("Cannot fstat \"%s\"", path);
|
|
|
|
close(fd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
for (obj = obj_list->next; obj != NULL; obj = obj->next) {
|
|
|
|
if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) {
|
|
|
|
close(fd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj == NULL) { /* First use of this object, so we must map it in */
|
1999-07-09 16:22:55 +00:00
|
|
|
dbg("loading \"%s\"", path);
|
1999-08-30 01:50:41 +00:00
|
|
|
obj = map_object(fd, path, &sb);
|
1998-03-07 19:24:35 +00:00
|
|
|
close(fd);
|
|
|
|
if (obj == NULL) {
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->path = path;
|
|
|
|
digest_dynamic(obj);
|
|
|
|
|
|
|
|
*obj_tail = obj;
|
|
|
|
obj_tail = &obj->next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count++;
|
1998-04-30 07:48:02 +00:00
|
|
|
linkmap_add(obj); /* for GDB */
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg(" %p .. %p: %s", obj->mapbase,
|
|
|
|
obj->mapbase + obj->mapsize - 1, obj->path);
|
|
|
|
if (obj->textrel)
|
|
|
|
dbg(" WARNING: %s has impure text", obj->path);
|
|
|
|
} else
|
|
|
|
free(path);
|
|
|
|
|
|
|
|
obj->refcount++;
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* Check for locking violations and die if one is found.
|
|
|
|
*/
|
1999-12-27 04:44:04 +00:00
|
|
|
static void
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
lock_check(void)
|
1999-12-27 04:44:04 +00:00
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
int rcount, wcount;
|
|
|
|
|
|
|
|
rcount = lockinfo.rcount;
|
|
|
|
wcount = lockinfo.wcount;
|
|
|
|
assert(rcount >= 0);
|
|
|
|
assert(wcount >= 0);
|
|
|
|
if (wcount > 1 || (wcount != 0 && rcount != 0)) {
|
|
|
|
_rtld_error("Application locking error: %d readers and %d writers"
|
|
|
|
" in dynamic linker. See DLLOCKINIT(3) in manual pages.",
|
|
|
|
rcount, wcount);
|
|
|
|
die();
|
|
|
|
}
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *
|
|
|
|
obj_from_addr(const void *addr)
|
|
|
|
{
|
|
|
|
unsigned long endhash;
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
endhash = elf_hash(END_SYM);
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next) {
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *endsym;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
if (addr < (void *) obj->mapbase)
|
|
|
|
continue;
|
|
|
|
if ((endsym = symlook_obj(END_SYM, endhash, obj, true)) == NULL)
|
|
|
|
continue; /* No "end" symbol?! */
|
|
|
|
if (addr < (void *) (obj->relocbase + endsym->st_value))
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Call the finalization functions for each of the objects in "list"
|
|
|
|
* which are unreferenced. All of the objects are expected to have
|
|
|
|
* non-NULL fini functions.
|
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_call_fini(Objlist *list)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since a fini function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2000-07-26 04:24:40 +00:00
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
|
|
|
if (elm->obj->refcount == 0) {
|
2001-10-29 10:10:10 +00:00
|
|
|
dbg("calling fini function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->fini);
|
|
|
|
call_initfini_pointer(elm->obj, elm->obj->fini);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
}
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the initialization functions for each of the objects in
|
|
|
|
* "list". All of the objects are expected to have non-NULL init
|
|
|
|
* functions.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
objlist_call_init(Objlist *list)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
2000-07-26 04:24:40 +00:00
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since an init function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2000-07-26 04:24:40 +00:00
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
2001-10-29 10:10:10 +00:00
|
|
|
dbg("calling init function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->init);
|
|
|
|
call_initfini_pointer(elm->obj, elm->obj->init);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_clear(Objlist *list)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
while (!STAILQ_EMPTY(list)) {
|
|
|
|
elm = STAILQ_FIRST(list);
|
|
|
|
STAILQ_REMOVE_HEAD(list, link);
|
|
|
|
free(elm);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static Objlist_Entry *
|
|
|
|
objlist_find(Objlist *list, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(elm, list, link)
|
|
|
|
if (elm->obj == obj)
|
|
|
|
return elm;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
static void
|
|
|
|
objlist_init(Objlist *list)
|
|
|
|
{
|
|
|
|
STAILQ_INIT(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_head(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_HEAD(list, elm, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_tail(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_TAIL(list, elm, link);
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
objlist_remove(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
if ((elm = objlist_find(list, obj)) != NULL) {
|
2000-05-26 02:09:24 +00:00
|
|
|
STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
|
1999-08-30 01:48:19 +00:00
|
|
|
free(elm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Remove all of the unreferenced objects from "list".
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
objlist_remove_unref(Objlist *list)
|
|
|
|
{
|
|
|
|
Objlist newlist;
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_INIT(&newlist);
|
|
|
|
while (!STAILQ_EMPTY(list)) {
|
|
|
|
elm = STAILQ_FIRST(list);
|
|
|
|
STAILQ_REMOVE_HEAD(list, link);
|
|
|
|
if (elm->obj->refcount == 0)
|
|
|
|
free(elm);
|
|
|
|
else
|
|
|
|
STAILQ_INSERT_TAIL(&newlist, elm, link);
|
|
|
|
}
|
|
|
|
*list = newlist;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Relocate newly-loaded shared objects. The argument is a pointer to
|
|
|
|
* the Obj_Entry for the first such object. All objects from the first
|
|
|
|
* to the end of the list of objects are relocated. Returns 0 on success,
|
|
|
|
* or -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
relocate_objects(Obj_Entry *first, bool bind_now)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
for (obj = first; obj != NULL; obj = obj->next) {
|
1998-09-22 02:09:56 +00:00
|
|
|
if (obj != &obj_rtld)
|
|
|
|
dbg("relocating \"%s\"", obj->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (obj->nbuckets == 0 || obj->nchains == 0 || obj->buckets == NULL ||
|
|
|
|
obj->symtab == NULL || obj->strtab == NULL) {
|
|
|
|
_rtld_error("%s: Shared object has no run-time symbol table",
|
|
|
|
obj->path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->textrel) {
|
|
|
|
/* There are relocations to the write-protected text segment. */
|
|
|
|
if (mprotect(obj->mapbase, obj->textsize,
|
|
|
|
PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
|
|
|
|
_rtld_error("%s: Cannot write-enable text segment: %s",
|
|
|
|
obj->path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process the non-PLT relocations. */
|
1998-09-04 19:03:57 +00:00
|
|
|
if (reloc_non_plt(obj, &obj_rtld))
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (obj->textrel) { /* Re-protected the text segment. */
|
|
|
|
if (mprotect(obj->mapbase, obj->textsize,
|
|
|
|
PROT_READ|PROT_EXEC) == -1) {
|
|
|
|
_rtld_error("%s: Cannot write-protect text segment: %s",
|
|
|
|
obj->path, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process the PLT relocations. */
|
2000-01-29 01:27:04 +00:00
|
|
|
if (reloc_plt(obj) == -1)
|
|
|
|
return -1;
|
|
|
|
/* Relocate the jump slots if we are doing immediate binding. */
|
|
|
|
if (bind_now)
|
|
|
|
if (reloc_jmpslots(obj) == -1)
|
1998-09-04 19:03:57 +00:00
|
|
|
return -1;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-01-29 01:27:04 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Set up the magic number and version in the Obj_Entry. These
|
|
|
|
* were checked in the crt1.o from the original ElfKit, so we
|
|
|
|
* set them for backward compatibility.
|
|
|
|
*/
|
|
|
|
obj->magic = RTLD_MAGIC;
|
|
|
|
obj->version = RTLD_VERSION;
|
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
/* Set the special PLT or GOT entries. */
|
|
|
|
init_pltgot(obj);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup procedure. It will be called (by the atexit mechanism) just
|
|
|
|
* before the process exits.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rtld_exit(void)
|
|
|
|
{
|
2000-01-09 21:13:48 +00:00
|
|
|
Obj_Entry *obj;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("rtld_exit()");
|
2000-07-26 04:24:40 +00:00
|
|
|
wlock_acquire();
|
|
|
|
/* Clear all the reference counts so the fini functions will be called. */
|
|
|
|
for (obj = obj_list; obj != NULL; obj = obj->next)
|
|
|
|
obj->refcount = 0;
|
|
|
|
wlock_release();
|
|
|
|
objlist_call_fini(&list_fini);
|
|
|
|
/* No need to remove the items from the list, since we are exiting. */
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static char *
|
|
|
|
search_library_path(const char *name, const char *path)
|
|
|
|
{
|
|
|
|
size_t namelen = strlen(name);
|
|
|
|
const char *p = path;
|
|
|
|
|
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
p += strspn(p, ":;");
|
|
|
|
while (*p != '\0') {
|
|
|
|
size_t len = strcspn(p, ":;");
|
|
|
|
|
|
|
|
if (*p == '/' || trust) {
|
|
|
|
char *pathname;
|
|
|
|
const char *dir = p;
|
|
|
|
size_t dirlen = len;
|
|
|
|
|
|
|
|
pathname = xmalloc(dirlen + 1 + namelen + 1);
|
|
|
|
strncpy(pathname, dir, dirlen);
|
|
|
|
pathname[dirlen] = '/';
|
|
|
|
strcpy(pathname + dirlen + 1, name);
|
|
|
|
|
|
|
|
dbg(" Trying \"%s\"", pathname);
|
|
|
|
if (access(pathname, F_OK) == 0) /* We found it */
|
|
|
|
return pathname;
|
|
|
|
|
|
|
|
free(pathname);
|
|
|
|
}
|
|
|
|
p += len;
|
|
|
|
p += strspn(p, ":;");
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dlclose(void *handle)
|
|
|
|
{
|
1999-12-27 04:44:04 +00:00
|
|
|
Obj_Entry *root;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
wlock_acquire();
|
|
|
|
root = dlcheck(handle);
|
|
|
|
if (root == NULL) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-01-09 21:13:48 +00:00
|
|
|
/* Unreference the object and its dependencies. */
|
1998-03-07 19:24:35 +00:00
|
|
|
root->dl_refcount--;
|
2000-01-09 21:13:48 +00:00
|
|
|
unref_dag(root);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2000-01-09 21:13:48 +00:00
|
|
|
if (root->refcount == 0) {
|
|
|
|
/*
|
|
|
|
* The object is no longer referenced, so we must unload it.
|
2000-07-26 04:24:40 +00:00
|
|
|
* First, call the fini functions with no locks held.
|
2000-01-09 21:13:48 +00:00
|
|
|
*/
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_call_fini(&list_fini);
|
2000-01-09 21:13:48 +00:00
|
|
|
wlock_acquire();
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_remove_unref(&list_fini);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
|
|
|
/* Finish cleaning up the newly-unreferenced objects. */
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_DELETE,&root->linkmap);
|
2000-01-09 21:13:48 +00:00
|
|
|
unload_object(root);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,NULL);
|
2000-01-09 21:13:48 +00:00
|
|
|
}
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
dlerror(void)
|
|
|
|
{
|
|
|
|
char *msg = error_message;
|
|
|
|
error_message = NULL;
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* This function is deprecated and has no effect.
|
|
|
|
*/
|
1999-12-27 04:44:04 +00:00
|
|
|
void
|
|
|
|
dllockinit(void *context,
|
|
|
|
void *(*lock_create)(void *context),
|
|
|
|
void (*rlock_acquire)(void *lock),
|
|
|
|
void (*wlock_acquire)(void *lock),
|
|
|
|
void (*lock_release)(void *lock),
|
|
|
|
void (*lock_destroy)(void *lock),
|
|
|
|
void (*context_destroy)(void *context))
|
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static void *cur_context;
|
|
|
|
static void (*cur_context_destroy)(void *);
|
|
|
|
|
|
|
|
/* Just destroy the context from the previous call, if necessary. */
|
|
|
|
if (cur_context_destroy != NULL)
|
|
|
|
cur_context_destroy(cur_context);
|
|
|
|
cur_context = context;
|
|
|
|
cur_context_destroy = context_destroy;
|
2000-01-29 01:27:04 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
void *
|
|
|
|
dlopen(const char *name, int mode)
|
|
|
|
{
|
1999-12-27 04:44:04 +00:00
|
|
|
Obj_Entry **old_obj_tail;
|
|
|
|
Obj_Entry *obj;
|
2000-07-26 04:24:40 +00:00
|
|
|
Objlist initlist;
|
2002-02-04 10:33:48 +00:00
|
|
|
int result;
|
|
|
|
|
|
|
|
ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
|
|
|
|
if (ld_tracing != NULL)
|
|
|
|
environ = (char **)*get_program_var_addr("environ");
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_init(&initlist);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
wlock_acquire();
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_ADD,NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
old_obj_tail = obj_tail;
|
|
|
|
obj = NULL;
|
1999-06-25 04:50:06 +00:00
|
|
|
if (name == NULL) {
|
1998-03-07 19:24:35 +00:00
|
|
|
obj = obj_main;
|
1999-06-25 04:50:06 +00:00
|
|
|
obj->refcount++;
|
|
|
|
} else {
|
1998-11-27 21:19:52 +00:00
|
|
|
char *path = find_library(name, obj_main);
|
1998-04-30 07:48:02 +00:00
|
|
|
if (path != NULL)
|
|
|
|
obj = load_object(path);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
if (obj) {
|
|
|
|
obj->dl_refcount++;
|
1999-08-30 01:48:19 +00:00
|
|
|
if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(&list_global, obj);
|
1999-08-30 01:48:19 +00:00
|
|
|
mode &= RTLD_MODEMASK;
|
1998-04-30 07:48:02 +00:00
|
|
|
if (*old_obj_tail != NULL) { /* We loaded something new. */
|
|
|
|
assert(*old_obj_tail == obj);
|
|
|
|
|
2002-02-04 10:33:48 +00:00
|
|
|
result = load_needed_objects(obj);
|
|
|
|
if (result != -1 && ld_tracing) {
|
|
|
|
trace_loaded_objects(obj);
|
|
|
|
wlock_release();
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result == -1 ||
|
1999-08-30 01:48:19 +00:00
|
|
|
(init_dag(obj), relocate_objects(obj, mode == RTLD_NOW)) == -1) {
|
1998-04-30 07:48:02 +00:00
|
|
|
obj->dl_refcount--;
|
2000-01-09 21:13:48 +00:00
|
|
|
unref_dag(obj);
|
|
|
|
if (obj->refcount == 0)
|
|
|
|
unload_object(obj);
|
1998-04-30 07:48:02 +00:00
|
|
|
obj = NULL;
|
2000-01-09 21:13:48 +00:00
|
|
|
} else {
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make list of init functions to call. */
|
|
|
|
initlist_add_objects(obj, &obj->next, &initlist);
|
2000-01-09 21:13:48 +00:00
|
|
|
}
|
1998-04-30 07:48:02 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
|
|
|
/* Call the init functions with no locks held. */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_call_init(&initlist);
|
2000-01-09 21:13:48 +00:00
|
|
|
wlock_acquire();
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
wlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
dlsym(void *handle, const char *name)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
unsigned long hash;
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *def;
|
1999-08-30 01:48:19 +00:00
|
|
|
const Obj_Entry *defobj;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
hash = elf_hash(name);
|
1998-09-02 01:09:34 +00:00
|
|
|
def = NULL;
|
1999-08-30 01:48:19 +00:00
|
|
|
defobj = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_acquire();
|
2000-09-19 04:27:16 +00:00
|
|
|
if (handle == NULL || handle == RTLD_NEXT || handle == RTLD_DEFAULT) {
|
1998-03-07 19:24:35 +00:00
|
|
|
void *retaddr;
|
|
|
|
|
|
|
|
retaddr = __builtin_return_address(0); /* __GNUC__ only */
|
|
|
|
if ((obj = obj_from_addr(retaddr)) == NULL) {
|
|
|
|
_rtld_error("Cannot determine caller's shared object");
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
if (handle == NULL) { /* Just the caller's shared object. */
|
1998-09-02 01:09:34 +00:00
|
|
|
def = symlook_obj(name, hash, obj, true);
|
1999-08-30 01:48:19 +00:00
|
|
|
defobj = obj;
|
2000-09-19 04:27:16 +00:00
|
|
|
} else if (handle == RTLD_NEXT) { /* Objects after caller's */
|
1999-08-30 01:48:19 +00:00
|
|
|
while ((obj = obj->next) != NULL) {
|
|
|
|
if ((def = symlook_obj(name, hash, obj, true)) != NULL) {
|
|
|
|
defobj = obj;
|
1998-09-02 01:09:34 +00:00
|
|
|
break;
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2000-09-19 04:27:16 +00:00
|
|
|
} else {
|
|
|
|
assert(handle == RTLD_DEFAULT);
|
|
|
|
def = symlook_default(name, hash, obj, &defobj, true);
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
} else {
|
1999-12-27 04:44:04 +00:00
|
|
|
if ((obj = dlcheck(handle)) == NULL) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
|
|
|
|
if (obj->mainprog) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
DoneList donelist;
|
|
|
|
|
1998-09-02 01:09:34 +00:00
|
|
|
/* Search main program and all libraries loaded by it. */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
donelist_init(&donelist);
|
|
|
|
def = symlook_list(name, hash, &list_main, &defobj, true,
|
|
|
|
&donelist);
|
1998-09-02 01:09:34 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* XXX - This isn't correct. The search should include the whole
|
|
|
|
* DAG rooted at the given object.
|
|
|
|
*/
|
|
|
|
def = symlook_obj(name, hash, obj, true);
|
1999-08-30 01:48:19 +00:00
|
|
|
defobj = obj;
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
if (def != NULL) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
2001-10-15 18:48:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The value required by the caller is derived from the value
|
|
|
|
* of the symbol. For the ia64 architecture, we need to
|
|
|
|
* construct a function descriptor which the caller can use to
|
|
|
|
* call the function with the right 'gp' value. For other
|
|
|
|
* architectures and for non-functions, the value is simply
|
|
|
|
* the relocated value of the symbol.
|
|
|
|
*/
|
|
|
|
if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
|
|
|
|
return make_function_pointer(def, defobj);
|
|
|
|
else
|
|
|
|
return defobj->relocbase + def->st_value;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
_rtld_error("Undefined symbol \"%s\"", name);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
int
|
|
|
|
dladdr(const void *addr, Dl_info *info)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
const Elf_Sym *def;
|
|
|
|
void *symbol_addr;
|
|
|
|
unsigned long symoffset;
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_acquire();
|
1999-03-24 23:47:29 +00:00
|
|
|
obj = obj_from_addr(addr);
|
1999-03-24 23:37:35 +00:00
|
|
|
if (obj == NULL) {
|
1999-03-24 23:47:29 +00:00
|
|
|
_rtld_error("No shared object contains address");
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1999-03-24 23:37:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
info->dli_fname = obj->path;
|
1999-03-24 23:47:29 +00:00
|
|
|
info->dli_fbase = obj->mapbase;
|
1999-03-24 23:37:35 +00:00
|
|
|
info->dli_saddr = (void *)0;
|
|
|
|
info->dli_sname = NULL;
|
|
|
|
|
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* Walk the symbol list looking for the symbol whose address is
|
1999-03-24 23:37:35 +00:00
|
|
|
* closest to the address sent in.
|
|
|
|
*/
|
|
|
|
for (symoffset = 0; symoffset < obj->nchains; symoffset++) {
|
|
|
|
def = obj->symtab + symoffset;
|
1999-03-24 23:47:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For skip the symbol if st_shndx is either SHN_UNDEF or
|
|
|
|
* SHN_COMMON.
|
|
|
|
*/
|
|
|
|
if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
|
|
|
|
continue;
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* If the symbol is greater than the specified address, or if it
|
1999-03-24 23:37:35 +00:00
|
|
|
* is further away from addr than the current nearest symbol,
|
|
|
|
* then reject it.
|
|
|
|
*/
|
1999-03-24 23:47:29 +00:00
|
|
|
symbol_addr = obj->relocbase + def->st_value;
|
|
|
|
if (symbol_addr > addr || symbol_addr < info->dli_saddr)
|
1999-03-24 23:37:35 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Update our idea of the nearest symbol. */
|
|
|
|
info->dli_sname = obj->strtab + def->st_name;
|
|
|
|
info->dli_saddr = symbol_addr;
|
|
|
|
|
|
|
|
/* Exact match? */
|
|
|
|
if (info->dli_saddr == addr)
|
|
|
|
break;
|
|
|
|
}
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
rlock_release();
|
1999-03-24 23:37:35 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
static void
|
|
|
|
linkmap_add(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
struct link_map *prev;
|
|
|
|
|
|
|
|
obj->linkmap.l_name = obj->path;
|
|
|
|
obj->linkmap.l_addr = obj->mapbase;
|
|
|
|
obj->linkmap.l_ld = obj->dynamic;
|
|
|
|
#ifdef __mips__
|
|
|
|
/* GDB needs load offset on MIPS to use the symbols */
|
|
|
|
obj->linkmap.l_offs = obj->relocbase;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (r_debug.r_map == NULL) {
|
|
|
|
r_debug.r_map = l;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
1998-09-16 02:54:08 +00:00
|
|
|
/*
|
|
|
|
* Scan to the end of the list, but not past the entry for the
|
|
|
|
* dynamic linker, which we want to keep at the very end.
|
|
|
|
*/
|
|
|
|
for (prev = r_debug.r_map;
|
|
|
|
prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
|
|
|
|
prev = prev->l_next)
|
1998-04-30 07:48:02 +00:00
|
|
|
;
|
1998-09-16 02:54:08 +00:00
|
|
|
|
|
|
|
/* Link in the new entry. */
|
1998-04-30 07:48:02 +00:00
|
|
|
l->l_prev = prev;
|
1998-09-16 02:54:08 +00:00
|
|
|
l->l_next = prev->l_next;
|
|
|
|
if (l->l_next != NULL)
|
|
|
|
l->l_next->l_prev = l;
|
1998-04-30 07:48:02 +00:00
|
|
|
prev->l_next = l;
|
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
|
|
|
linkmap_delete(Obj_Entry *obj)
|
1998-04-30 07:48:02 +00:00
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
|
|
|
|
if (l->l_prev == NULL) {
|
|
|
|
if ((r_debug.r_map = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((l->l_prev->l_next = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = l->l_prev;
|
|
|
|
}
|
1998-05-01 08:39:27 +00:00
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Function for the debugger to set a breakpoint on to gain control.
|
2000-08-26 05:13:29 +00:00
|
|
|
*
|
|
|
|
* The two parameters allow the debugger to easily find and determine
|
|
|
|
* what the runtime loader is doing and to whom it is doing it.
|
|
|
|
*
|
|
|
|
* When the loadhook trap is hit (r_debug_state, set at program
|
|
|
|
* initialization), the arguments can be found on the stack:
|
|
|
|
*
|
|
|
|
* +8 struct link_map *m
|
|
|
|
* +4 struct r_debug *rd
|
|
|
|
* +0 RetAddr
|
1998-09-02 02:00:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(struct r_debug* rd, struct link_map *m)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
/*
|
2002-02-04 10:33:48 +00:00
|
|
|
* Get address of the pointer variable in the main program.
|
1999-04-21 04:06:57 +00:00
|
|
|
*/
|
2002-02-04 10:33:48 +00:00
|
|
|
static const void **
|
|
|
|
get_program_var_addr(const char *name)
|
1999-04-21 04:06:57 +00:00
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
unsigned long hash;
|
|
|
|
|
|
|
|
hash = elf_hash(name);
|
|
|
|
for (obj = obj_main; obj != NULL; obj = obj->next) {
|
|
|
|
const Elf_Sym *def;
|
|
|
|
|
|
|
|
if ((def = symlook_obj(name, hash, obj, false)) != NULL) {
|
|
|
|
const void **addr;
|
|
|
|
|
|
|
|
addr = (const void **)(obj->relocbase + def->st_value);
|
2002-02-04 10:33:48 +00:00
|
|
|
return addr;
|
1999-04-21 04:06:57 +00:00
|
|
|
}
|
|
|
|
}
|
2002-02-04 10:33:48 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a pointer variable in the main program to the given value. This
|
|
|
|
* is used to set key variables such as "environ" before any of the
|
|
|
|
* init functions are called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
set_program_var(const char *name, const void *value)
|
|
|
|
{
|
|
|
|
const void **addr;
|
|
|
|
|
|
|
|
if ((addr = get_program_var_addr(name)) != NULL) {
|
|
|
|
dbg("\"%s\": *%p <-- %p", name, addr, value);
|
|
|
|
*addr = value;
|
|
|
|
}
|
1999-04-21 04:06:57 +00:00
|
|
|
}
|
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
/*
|
|
|
|
* Given a symbol name in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
|
|
|
static const Elf_Sym *
|
|
|
|
symlook_default(const char *name, unsigned long hash,
|
|
|
|
const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt)
|
|
|
|
{
|
|
|
|
DoneList donelist;
|
|
|
|
const Elf_Sym *def;
|
|
|
|
const Elf_Sym *symp;
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
const Obj_Entry *defobj;
|
|
|
|
const Objlist_Entry *elm;
|
|
|
|
def = NULL;
|
|
|
|
defobj = NULL;
|
|
|
|
donelist_init(&donelist);
|
|
|
|
|
|
|
|
/* Look first in the referencing object if linked symbolically. */
|
|
|
|
if (refobj->symbolic && !donelist_check(&donelist, refobj)) {
|
|
|
|
symp = symlook_obj(name, hash, refobj, in_plt);
|
|
|
|
if (symp != NULL) {
|
|
|
|
def = symp;
|
|
|
|
defobj = refobj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search all objects loaded at program start up. */
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
|
|
|
symp = symlook_list(name, hash, &list_main, &obj, in_plt, &donelist);
|
|
|
|
if (symp != NULL &&
|
|
|
|
(def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) {
|
|
|
|
def = symp;
|
|
|
|
defobj = obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search all dlopened DAGs containing the referencing object. */
|
|
|
|
STAILQ_FOREACH(elm, &refobj->dldags, link) {
|
|
|
|
if (def != NULL && ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
symp = symlook_list(name, hash, &elm->obj->dagmembers, &obj, in_plt,
|
|
|
|
&donelist);
|
|
|
|
if (symp != NULL &&
|
|
|
|
(def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) {
|
|
|
|
def = symp;
|
|
|
|
defobj = obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-02-27 23:44:50 +00:00
|
|
|
/* Search all DAGs whose roots are RTLD_GLOBAL objects. */
|
|
|
|
STAILQ_FOREACH(elm, &list_global, link) {
|
|
|
|
if (def != NULL && ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
symp = symlook_list(name, hash, &elm->obj->dagmembers, &obj, in_plt,
|
|
|
|
&donelist);
|
2000-09-19 04:27:16 +00:00
|
|
|
if (symp != NULL &&
|
|
|
|
(def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) {
|
|
|
|
def = symp;
|
|
|
|
defobj = obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
|
|
|
* dynamic linker services such as dlopen. Only the values listed
|
|
|
|
* in the "exports" array can be resolved from the dynamic linker.
|
|
|
|
*/
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
|
|
|
symp = symlook_obj(name, hash, &obj_rtld, in_plt);
|
|
|
|
if (symp != NULL && is_exported(symp)) {
|
|
|
|
def = symp;
|
|
|
|
defobj = &obj_rtld;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (def != NULL)
|
|
|
|
*defobj_out = defobj;
|
|
|
|
return def;
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static const Elf_Sym *
|
|
|
|
symlook_list(const char *name, unsigned long hash, Objlist *objlist,
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
const Obj_Entry **defobj_out, bool in_plt, DoneList *dlp)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
|
|
|
const Elf_Sym *symp;
|
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
|
|
|
const Objlist_Entry *elm;
|
|
|
|
|
|
|
|
def = NULL;
|
|
|
|
defobj = NULL;
|
|
|
|
STAILQ_FOREACH(elm, objlist, link) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
if (donelist_check(dlp, elm->obj))
|
1999-08-30 01:48:19 +00:00
|
|
|
continue;
|
|
|
|
if ((symp = symlook_obj(name, hash, elm->obj, in_plt)) != NULL) {
|
|
|
|
if (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK) {
|
|
|
|
def = symp;
|
|
|
|
defobj = elm->obj;
|
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (def != NULL)
|
|
|
|
*defobj_out = defobj;
|
|
|
|
return def;
|
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Search the symbol table of a single shared object for a symbol of
|
|
|
|
* the given name. Returns a pointer to the symbol, or NULL if no
|
|
|
|
* definition was found.
|
|
|
|
*
|
|
|
|
* The symbol's hash value is passed in for efficiency reasons; that
|
|
|
|
* eliminates many recomputations of the hash value.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *
|
1998-09-02 02:00:20 +00:00
|
|
|
symlook_obj(const char *name, unsigned long hash, const Obj_Entry *obj,
|
|
|
|
bool in_plt)
|
|
|
|
{
|
1999-04-09 00:28:43 +00:00
|
|
|
if (obj->buckets != NULL) {
|
|
|
|
unsigned long symnum = obj->buckets[hash % obj->nbuckets];
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
while (symnum != STN_UNDEF) {
|
|
|
|
const Elf_Sym *symp;
|
|
|
|
const char *strp;
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-07-18 00:02:19 +00:00
|
|
|
if (symnum >= obj->nchains)
|
|
|
|
return NULL; /* Bad object */
|
1999-04-09 00:28:43 +00:00
|
|
|
symp = obj->symtab + symnum;
|
|
|
|
strp = obj->strtab + symp->st_name;
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
if (name[0] == strp[0] && strcmp(name, strp) == 0)
|
1999-04-09 00:28:43 +00:00
|
|
|
return symp->st_shndx != SHN_UNDEF ||
|
|
|
|
(!in_plt && symp->st_value != 0 &&
|
|
|
|
ELF_ST_TYPE(symp->st_info) == STT_FUNC) ? symp : NULL;
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-04-09 00:28:43 +00:00
|
|
|
symnum = obj->chains[symnum];
|
|
|
|
}
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
trace_loaded_objects(Obj_Entry *obj)
|
1998-05-01 08:39:27 +00:00
|
|
|
{
|
2002-02-17 07:04:32 +00:00
|
|
|
char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
|
1998-05-01 08:39:27 +00:00
|
|
|
int c;
|
|
|
|
|
|
|
|
if ((main_local = getenv("LD_TRACE_LOADED_OBJECTS_PROGNAME")) == NULL)
|
|
|
|
main_local = "";
|
|
|
|
|
|
|
|
if ((fmt1 = getenv("LD_TRACE_LOADED_OBJECTS_FMT1")) == NULL)
|
|
|
|
fmt1 = "\t%o => %p (%x)\n";
|
|
|
|
|
|
|
|
if ((fmt2 = getenv("LD_TRACE_LOADED_OBJECTS_FMT2")) == NULL)
|
|
|
|
fmt2 = "\t%o (%x)\n";
|
|
|
|
|
2002-02-17 07:04:32 +00:00
|
|
|
list_containers = getenv("LD_TRACE_LOADED_OBJECTS_ALL");
|
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
for (; obj; obj = obj->next) {
|
|
|
|
Needed_Entry *needed;
|
|
|
|
char *name, *path;
|
|
|
|
bool is_lib;
|
|
|
|
|
2002-02-17 07:04:32 +00:00
|
|
|
if (list_containers && obj->needed != NULL)
|
|
|
|
printf("%s:\n", obj->path);
|
1998-05-01 08:39:27 +00:00
|
|
|
for (needed = obj->needed; needed; needed = needed->next) {
|
1998-09-02 02:51:12 +00:00
|
|
|
if (needed->obj != NULL) {
|
2002-02-17 07:04:32 +00:00
|
|
|
if (needed->obj->traced && !list_containers)
|
1998-09-02 02:51:12 +00:00
|
|
|
continue;
|
|
|
|
needed->obj->traced = true;
|
1998-05-01 08:39:27 +00:00
|
|
|
path = needed->obj->path;
|
1998-09-02 02:51:12 +00:00
|
|
|
} else
|
|
|
|
path = "not found";
|
|
|
|
|
|
|
|
name = (char *)obj->strtab + needed->name;
|
|
|
|
is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
|
1998-05-01 08:39:27 +00:00
|
|
|
|
|
|
|
fmt = is_lib ? fmt1 : fmt2;
|
|
|
|
while ((c = *fmt++) != '\0') {
|
|
|
|
switch (c) {
|
|
|
|
default:
|
|
|
|
putchar(c);
|
|
|
|
continue;
|
|
|
|
case '\\':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case 'n':
|
|
|
|
putchar('\n');
|
|
|
|
break;
|
|
|
|
case 't':
|
|
|
|
putchar('\t');
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '%':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case '%':
|
|
|
|
default:
|
|
|
|
putchar(c);
|
|
|
|
break;
|
|
|
|
case 'A':
|
|
|
|
printf("%s", main_local);
|
|
|
|
break;
|
|
|
|
case 'a':
|
|
|
|
printf("%s", obj_main->path);
|
|
|
|
break;
|
|
|
|
case 'o':
|
|
|
|
printf("%s", name);
|
|
|
|
break;
|
|
|
|
#if 0
|
|
|
|
case 'm':
|
|
|
|
printf("%d", sodp->sod_major);
|
|
|
|
break;
|
|
|
|
case 'n':
|
|
|
|
printf("%d", sodp->sod_minor);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case 'p':
|
|
|
|
printf("%s", path);
|
|
|
|
break;
|
|
|
|
case 'x':
|
|
|
|
printf("%p", needed->obj ? needed->obj->mapbase : 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++fmt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
/*
|
2000-01-09 21:13:48 +00:00
|
|
|
* Unload a dlopened object and its dependencies from memory and from
|
|
|
|
* our data structures. It is assumed that the DAG rooted in the
|
|
|
|
* object has already been unreferenced, and that the object has a
|
|
|
|
* reference count of 0.
|
1999-08-30 01:48:19 +00:00
|
|
|
*/
|
1999-07-09 16:22:55 +00:00
|
|
|
static void
|
2000-01-09 21:13:48 +00:00
|
|
|
unload_object(Obj_Entry *root)
|
1999-07-09 16:22:55 +00:00
|
|
|
{
|
2000-01-09 21:13:48 +00:00
|
|
|
Obj_Entry *obj;
|
|
|
|
Obj_Entry **linkp;
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
assert(root->refcount == 0);
|
|
|
|
|
|
|
|
/* Remove the DAG from all objects' DAG lists. */
|
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers , link)
|
|
|
|
objlist_remove(&elm->obj->dldags, root);
|
|
|
|
|
|
|
|
/* Remove the DAG from the RTLD_GLOBAL list. */
|
|
|
|
objlist_remove(&list_global, root);
|
|
|
|
|
|
|
|
/* Unmap all objects that are no longer referenced. */
|
|
|
|
linkp = &obj_list->next;
|
|
|
|
while ((obj = *linkp) != NULL) {
|
|
|
|
if (obj->refcount == 0) {
|
|
|
|
dbg("unloading \"%s\"", obj->path);
|
|
|
|
munmap(obj->mapbase, obj->mapsize);
|
|
|
|
linkmap_delete(obj);
|
|
|
|
*linkp = obj->next;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count--;
|
2000-01-09 21:13:48 +00:00
|
|
|
obj_free(obj);
|
|
|
|
} else
|
|
|
|
linkp = &obj->next;
|
1999-07-09 16:22:55 +00:00
|
|
|
}
|
2000-01-09 21:13:48 +00:00
|
|
|
obj_tail = linkp;
|
1999-07-09 16:22:55 +00:00
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
1999-08-30 01:48:19 +00:00
|
|
|
unref_dag(Obj_Entry *root)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
1999-08-20 22:33:44 +00:00
|
|
|
const Needed_Entry *needed;
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
if (root->refcount == 0)
|
|
|
|
return;
|
1998-09-02 02:00:20 +00:00
|
|
|
root->refcount--;
|
1999-08-20 22:33:44 +00:00
|
|
|
if (root->refcount == 0)
|
1998-09-02 02:00:20 +00:00
|
|
|
for (needed = root->needed; needed != NULL; needed = needed->next)
|
1999-08-20 22:33:44 +00:00
|
|
|
if (needed->obj != NULL)
|
1999-08-30 01:48:19 +00:00
|
|
|
unref_dag(needed->obj);
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Non-mallocing printf, for use by malloc itself.
|
|
|
|
* XXX - This doesn't belong in this module.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprintf(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
char buf[256];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsprintf(buf, fmt, ap);
|
2001-07-26 11:02:39 +00:00
|
|
|
(void)write(STDOUT_FILENO, buf, strlen(buf));
|
1998-09-02 02:00:20 +00:00
|
|
|
va_end(ap);
|
|
|
|
}
|