Add ia64 support. Various adjustments were made to existing targets to

cope with a few interface changes required by the ia64. In particular,
function pointers on ia64 need special treatment in rtld.
This commit is contained in:
Doug Rabson 2001-10-15 18:48:42 +00:00
parent be5780f384
commit b5393d9f78
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=85004
14 changed files with 1069 additions and 33 deletions

View File

@ -239,7 +239,8 @@ reloc_jmpslots(Obj_Entry *obj)
if (def == NULL)
return -1;
reloc_jmpslot(where,
(Elf_Addr)(defobj->relocbase + def->st_value));
(Elf_Addr)(defobj->relocbase + def->st_value),
defobj);
}
} else {
const Elf_Rela *relalim;
@ -258,7 +259,8 @@ reloc_jmpslots(Obj_Entry *obj)
if (def == NULL)
return -1;
reloc_jmpslot(where,
(Elf_Addr)(defobj->relocbase + def->st_value));
(Elf_Addr)(defobj->relocbase + def->st_value),
defobj);
}
}
obj->jmpslots_done = true;
@ -266,8 +268,8 @@ reloc_jmpslots(Obj_Entry *obj)
}
/* Fixup the jump slot at "where" to transfer control to "target". */
void
reloc_jmpslot(Elf_Addr *where, Elf_Addr target)
Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj)
{
Elf_Addr stubaddr;
@ -339,7 +341,7 @@ reloc_jmpslot(Elf_Addr *where, Elf_Addr target)
*/
if ((int32_t)delta != delta) {
dbg(" PLT stub too far from GOT to relocate");
return;
return target;
}
dhigh = delta - (int16_t)delta;
if (dhigh != 0) {
@ -389,6 +391,8 @@ reloc_jmpslot(Elf_Addr *where, Elf_Addr target)
__asm__ __volatile__("wmb" : : : "memory");
stubptr[0] = inst[0];
}
return target;
}
/* Process an R_ALPHA_COPY relocation. */

View File

@ -29,10 +29,16 @@
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) (&_DYNAMIC)
void reloc_jmpslot(Elf_Addr *, Elf_Addr);
Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr,
const struct Struct_Obj_Entry *obj);
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
/* Atomic operations. */
int cmp0_and_store_int(volatile int *, int);

View File

@ -237,7 +237,7 @@ reloc_jmpslots(Obj_Entry *obj)
return 0;
rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
Elf_Addr *where;
Elf_Addr *where, target;
const Elf_Sym *def;
const Obj_Entry *defobj;
@ -246,7 +246,8 @@ reloc_jmpslots(Obj_Entry *obj)
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL);
if (def == NULL)
return -1;
reloc_jmpslot(where, (Elf_Addr)(defobj->relocbase + def->st_value));
target = (Elf_Addr)(defobj->relocbase + def->st_value);
reloc_jmpslot(where, target, defobj);
}
obj->jmpslots_done = true;
return 0;

View File

@ -29,17 +29,25 @@
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) \
((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC))
/* Fixup the jump slot at "where" to transfer control to "target". */
#define reloc_jmpslot(where, target) \
do { \
dbg("reloc_jmpslot: *%p = %p", (void *)(where), \
(void *)(target)); \
(*(Elf_Addr *)(where) = (Elf_Addr)(target)); \
} while (0)
static inline Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const struct Struct_Obj_Entry *obj)
{
dbg("reloc_jmpslot: *%p = %p", (void *)(where),
(void *)(target));
(*(Elf_Addr *)(where) = (Elf_Addr)(target));
return target;
}
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
static inline void
atomic_decr_int(volatile int *p)

View File

@ -237,7 +237,7 @@ reloc_jmpslots(Obj_Entry *obj)
return 0;
rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
Elf_Addr *where;
Elf_Addr *where, target;
const Elf_Sym *def;
const Obj_Entry *defobj;
@ -246,7 +246,8 @@ reloc_jmpslots(Obj_Entry *obj)
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL);
if (def == NULL)
return -1;
reloc_jmpslot(where, (Elf_Addr)(defobj->relocbase + def->st_value));
target = (Elf_Addr)(defobj->relocbase + def->st_value);
reloc_jmpslot(where, target, defobj);
}
obj->jmpslots_done = true;
return 0;

View File

@ -29,17 +29,25 @@
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) \
((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC))
/* Fixup the jump slot at "where" to transfer control to "target". */
#define reloc_jmpslot(where, target) \
do { \
dbg("reloc_jmpslot: *%p = %p", (void *)(where), \
(void *)(target)); \
(*(Elf_Addr *)(where) = (Elf_Addr)(target)); \
} while (0)
static inline Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const struct Struct_Obj_Entry *obj)
{
dbg("reloc_jmpslot: *%p = %p", (void *)(where),
(void *)(target));
(*(Elf_Addr *)(where) = (Elf_Addr)(target));
return target;
}
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
static inline void
atomic_decr_int(volatile int *p)

View File

@ -0,0 +1,2 @@
# $FreeBSD$
LDFLAGS+= -Wl,--export-dynamic

View File

@ -0,0 +1,181 @@
/*-
* Copyright 1999, 2000 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Thread locking implementation for the dynamic linker.
*
* We use the "simple, non-scalable reader-preference lock" from:
*
* J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
* Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
* Principles and Practice of Parallel Programming, April 1991.
*
* In this algorithm the lock is a single word. Its low-order bit is
* set when a writer holds the lock. The remaining high-order bits
* contain a count of readers desiring the lock. The algorithm requires
* atomic "compare_and_store" and "add" operations, which we implement
* using assembly language sequences in "rtld_start.S".
*
* These are spinlocks. When spinning we call nanosleep() for 1
* microsecond each time around the loop. This will most likely yield
* the CPU to other threads (including, we hope, the lockholder) allowing
* them to make some progress.
*/
#include <signal.h>
#include <stdlib.h>
#include <time.h>
#include "debug.h"
#include "rtld.h"
/*
* This value of CACHE_LINE_SIZE is conservative. The actual size
* is 32 on the 21064, 21064A, 21066, 21066A, and 21164. It is 64
* on the 21264. Compaq recommends sequestering each lock in its own
* 128-byte block to allow for future implementations with larger
* cache lines.
*/
#define CACHE_LINE_SIZE 128
#define WAFLAG 0x1 /* A writer holds the lock */
#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
typedef struct Struct_Lock {
volatile int lock;
void *base;
} Lock;
static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
static sigset_t fullsigmask, oldsigmask;
static void *
lock_create(void *context)
{
void *base;
char *p;
uintptr_t r;
Lock *l;
/*
* Arrange for the lock to occupy its own cache line. First, we
* optimistically allocate just a cache line, hoping that malloc
* will give us a well-aligned block of memory. If that doesn't
* work, we allocate a larger block and take a well-aligned cache
* line from it.
*/
base = xmalloc(CACHE_LINE_SIZE);
p = (char *)base;
if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
free(base);
base = xmalloc(2 * CACHE_LINE_SIZE);
p = (char *)base;
if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
p += CACHE_LINE_SIZE - r;
}
l = (Lock *)p;
l->base = base;
l->lock = 0;
return l;
}
static void
lock_destroy(void *lock)
{
Lock *l = (Lock *)lock;
free(l->base);
}
static void
rlock_acquire(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_int(&l->lock, RC_INCR);
while (l->lock & WAFLAG)
nanosleep(&usec, NULL);
}
static void
wlock_acquire(void *lock)
{
Lock *l = (Lock *)lock;
sigset_t tmp_oldsigmask;
for ( ; ; ) {
sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
if (cmp0_and_store_int(&l->lock, WAFLAG) == 0)
break;
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
nanosleep(&usec, NULL);
}
oldsigmask = tmp_oldsigmask;
}
static void
rlock_release(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_int(&l->lock, -RC_INCR);
}
static void
wlock_release(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_int(&l->lock, -WAFLAG);
sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
}
void
lockdflt_init(LockInfo *li)
{
li->context = NULL;
li->lock_create = lock_create;
li->rlock_acquire = rlock_acquire;
li->wlock_acquire = wlock_acquire;
li->rlock_release = rlock_release;
li->wlock_release = wlock_release;
li->lock_destroy = lock_destroy;
li->context_destroy = NULL;
/*
* Construct a mask to block all signals except traps which might
* conceivably be generated within the dynamic linker itself.
*/
sigfillset(&fullsigmask);
sigdelset(&fullsigmask, SIGILL);
sigdelset(&fullsigmask, SIGTRAP);
sigdelset(&fullsigmask, SIGABRT);
sigdelset(&fullsigmask, SIGEMT);
sigdelset(&fullsigmask, SIGFPE);
sigdelset(&fullsigmask, SIGBUS);
sigdelset(&fullsigmask, SIGSEGV);
sigdelset(&fullsigmask, SIGSYS);
}

View File

@ -0,0 +1,435 @@
/*-
* Copyright 1996, 1997, 1998, 1999 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Dynamic linker for ELF.
*
* John Polstra <jdp@polstra.com>.
*/
#include <sys/param.h>
#include <sys/mman.h>
#include <dlfcn.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "rtld.h"
extern Elf_Dyn _DYNAMIC;
/*
* Macros for loading/storing unaligned 64-bit values. These are
* needed because relocations can point to unaligned data. This
* occurs in the DWARF2 exception frame tables generated by the
* compiler, for instance.
*
* We don't use these when relocating jump slots and GOT entries,
* since they are guaranteed to be aligned.
*
* XXX dfr stub for now.
*/
#define load64(p) (*(u_int64_t *) (p))
#define store64(p, v) (*(u_int64_t *) (p) = (v))
/* Allocate an @fptr. */
#define FPTR_CHUNK_SIZE 64
struct fptr_chunk {
struct fptr fptrs[FPTR_CHUNK_SIZE];
};
static struct fptr_chunk first_chunk;
static struct fptr_chunk *current_chunk = &first_chunk;
static struct fptr *next_fptr = &first_chunk.fptrs[0];
static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
/*
* We use static storage initially so that we don't have to call
* malloc during init_rtld().
*/
static struct fptr *
alloc_fptr(Elf_Addr target, Elf_Addr gp)
{
struct fptr* fptr;
if (next_fptr == last_fptr) {
current_chunk = malloc(sizeof(struct fptr_chunk));
next_fptr = &current_chunk->fptrs[0];
last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
}
fptr = next_fptr;
next_fptr++;
fptr->target = target;
fptr->gp = gp;
return fptr;
}
/* Relocate a non-PLT object with addend. */
static int
reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
SymCache *cache, struct fptr **fptrs)
{
Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
switch (ELF_R_TYPE(rela->r_info)) {
case R_IA64_REL64LSB:
/*
* We handle rtld's relocations in rtld_start.S
*/
if (obj != obj_rtld)
store64(where,
load64(where) + (Elf_Addr) obj->relocbase);
break;
case R_IA64_DIR64LSB: {
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr target;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
false, cache);
if (def == NULL)
return -1;
target = (Elf_Addr) (defobj->relocbase + def->st_value);
store64(where, target + rela->r_addend);
break;
}
case R_IA64_FPTR64LSB: {
/*
* We have to make sure that all @fptr references to
* the same function are identical so that code can
* compare function pointers. We actually only bother
* to ensure this within a single object. If the
* caller's alloca failed, we don't even ensure that.
*/
const Elf_Sym *def;
const Obj_Entry *defobj;
struct fptr *fptr = 0;
Elf_Addr target, gp;
/*
* Not sure why the call to find_symdef() doesn't work
* properly (it fails if the symbol is local). Perhaps
* this is a toolchain issue - revisit after we
* upgrade the ia64 toolchain.
*/
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
false, cache);
if (def == NULL) {
def = &obj->symtab[ELF_R_SYM(rela->r_info)];
defobj = obj;
}
target = (Elf_Addr) (defobj->relocbase + def->st_value);
gp = (Elf_Addr) defobj->pltgot;
/*
* Find the @fptr, using fptrs as a helper.
*/
if (fptrs)
fptr = fptrs[ELF_R_SYM(rela->r_info)];
if (!fptr) {
fptr = alloc_fptr(target, gp);
if (fptrs)
fptrs[ELF_R_SYM(rela->r_info)] = fptr;
}
store64(where, (Elf_Addr) fptr);
break;
}
default:
_rtld_error("%s: Unsupported relocation type %d"
" in non-PLT relocations\n", obj->path,
ELF_R_TYPE(rela->r_info));
return -1;
}
return(0);
}
/* Process the non-PLT relocations. */
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
const Elf_Rela *relalim;
const Elf_Rela *rela;
SymCache *cache;
struct fptr **fptrs;
cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
if (cache != NULL)
memset(cache, 0, obj->nchains * sizeof(SymCache));
/*
* When relocating rtld itself, we need to avoid using malloc.
*/
if (obj == obj_rtld)
fptrs = (struct fptr **)
alloca(obj->nchains * sizeof(struct fptr *));
else
fptrs = (struct fptr **)
malloc(obj->nchains * sizeof(struct fptr *));
if (fptrs == NULL)
return -1;
memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
/* Perform relocations without addend if there are any: */
rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) {
Elf_Rela locrela;
locrela.r_info = rel->r_info;
locrela.r_offset = rel->r_offset;
locrela.r_addend = 0;
if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs))
return -1;
}
/* Perform relocations with addend if there are any: */
relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) {
if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs))
return -1;
}
/*
* Remember the fptrs in case of later calls to dlsym(). Don't
* bother for rtld - we will lazily create a table in
* make_function_pointer(). At this point we still can't risk
* calling malloc().
*/
if (obj != obj_rtld)
obj->priv = fptrs;
else
obj->priv = NULL;
return 0;
}
/* Process the PLT relocations. */
int
reloc_plt(Obj_Entry *obj)
{
/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
if (obj->pltrelsize != 0) {
const Elf_Rel *rellim;
const Elf_Rel *rel;
rellim = (const Elf_Rel *)
((char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
Elf_Addr *where;
assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
/* Relocate the @fptr pointing into the PLT. */
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
*where += (Elf_Addr)obj->relocbase;
}
} else {
const Elf_Rela *relalim;
const Elf_Rela *rela;
relalim = (const Elf_Rela *)
((char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
assert(ELF_R_TYPE(rela->r_info) == R_IA64_IPLTLSB);
/* Relocate the @fptr pointing into the PLT. */
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
*where += (Elf_Addr)obj->relocbase;
}
}
return 0;
}
/* Relocate the jump slots in an object. */
int
reloc_jmpslots(Obj_Entry *obj)
{
if (obj->jmpslots_done)
return 0;
/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
if (obj->pltrelsize != 0) {
const Elf_Rel *rellim;
const Elf_Rel *rel;
rellim = (const Elf_Rel *)
((char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
Elf_Addr *where;
const Elf_Sym *def;
const Obj_Entry *defobj;
assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
def = find_symdef(ELF_R_SYM(rel->r_info), obj,
&defobj, true, NULL);
if (def == NULL)
return -1;
reloc_jmpslot(where,
(Elf_Addr)(defobj->relocbase
+ def->st_value),
defobj);
}
} else {
const Elf_Rela *relalim;
const Elf_Rela *rela;
relalim = (const Elf_Rela *)
((char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
const Elf_Sym *def;
const Obj_Entry *defobj;
/* assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); */
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj,
&defobj, true, NULL);
if (def == NULL)
return -1;
reloc_jmpslot(where,
(Elf_Addr)(defobj->relocbase
+ def->st_value),
defobj);
}
}
obj->jmpslots_done = true;
return 0;
}
/* Fixup the jump slot at "where" to transfer control to "target". */
Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj)
{
Elf_Addr stubaddr;
dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
(void *)where, (void *)target, (void *)obj->pltgot);
stubaddr = *where;
if (stubaddr != target) {
/*
* Point this @fptr directly at the target. Update the
* gp value first so that we don't break another cpu
* which is currently executing the PLT entry.
*/
where[1] = (Elf_Addr) obj->pltgot;
ia64_mf();
where[0] = target;
ia64_mf();
}
/*
* The caller needs an @fptr for the adjusted entry. The PLT
* entry serves this purpose nicely.
*/
return (Elf_Addr) where;
}
/*
* XXX ia64 doesn't seem to have copy relocations.
*
* Returns 0 on success, -1 on failure.
*/
int
do_copy_relocations(Obj_Entry *dstobj)
{
return 0;
}
/*
* Return the @fptr representing a given function symbol.
*/
void *
make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
{
struct fptr **fptrs = obj->priv;
int index = sym - obj->symtab;
if (!fptrs) {
/*
* This should only happen for something like
* dlsym("dlopen"). Actually, I'm not sure it can ever
* happen.
*/
fptrs = (struct fptr **)
malloc(obj->nchains * sizeof(struct fptr *));
memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
((Obj_Entry*) obj)->priv = fptrs;
}
if (!fptrs[index]) {
Elf_Addr target, gp;
target = (Elf_Addr) (obj->relocbase + sym->st_value);
gp = (Elf_Addr) obj->pltgot;
fptrs[index] = alloc_fptr(target, gp);
}
return fptrs[index];
}
/* Initialize the special PLT entries. */
void
init_pltgot(Obj_Entry *obj)
{
const Elf_Dyn *dynp;
Elf_Addr *pltres = 0;
/*
* Find the PLT RESERVE section.
*/
for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
if (dynp->d_tag == DT_IA64_PLT_RESERVE)
pltres = (u_int64_t *)
(obj->relocbase + dynp->d_un.d_ptr);
}
if (!pltres)
errx(1, "Can't find DT_IA64_PLT_RESERVE entry");
/*
* The PLT RESERVE section is used to get values to pass to
* _rtld_bind when lazy binding.
*/
pltres[0] = (Elf_Addr) obj;
pltres[1] = FPTR_TARGET(_rtld_bind_start);
pltres[2] = FPTR_GP(_rtld_bind_start);
}

View File

@ -0,0 +1,57 @@
/*-
* Copyright (c) 1999, 2000 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
/*
* Macros for cracking ia64 function pointers.
*/
struct fptr {
Elf_Addr target;
Elf_Addr gp;
};
#define FPTR_TARGET(f) (((struct fptr *) (f))->target)
#define FPTR_GP(f) (((struct fptr *) (f))->gp)
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) (&_DYNAMIC)
struct Struct_Obj_Entry;
Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr, const struct Struct_Obj_Entry *);
void *make_function_pointer(const Elf_Sym *, const struct Struct_Obj_Entry *);
/* Atomic operations. */
int cmp0_and_store_int(volatile int *, int);
void atomic_add_int(volatile int *, int);
void atomic_incr_int(volatile int *);
void atomic_decr_int(volatile int *);
#endif

View File

@ -0,0 +1,306 @@
/* $FreeBSD$ */
/* From: NetBSD: rtld_start.S,v 1.1 1996/12/16 20:38:09 cgd Exp */
/*
* Copyright 1996 Matt Thomas <matt@3am-software.com>
* Copyright 2000 John D. Polstra
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#include <sys/syscall.h>
ENTRY(_rtld_start, 0)
alloc r2=ar.pfs,3,0,3,0
;;
1: mov r14=ip // calculate gp
addl r3=@gprel(1b),r0
;;
sub gp=r14,r3
;;
.section .sdata
2: data4 @ltv(1b) // unrelocated address of 1b
.align 8
.previous
add r15=@gprel(2b),gp
;;
ld8 r15=[r15]
;;
sub out0=r14,r15 // out0 is image base address
br.call.sptk.many rp=_rtld_reloc // fixup image
add sp=-16,sp // 16 bytes for us, 16 for _rtld
;;
mov out0=in0
add out1=16,sp // address for exit proc
add out2=24,sp // address for obj_main
br.call.sptk.many rp=_rtld // r8=_rtld(sp, &exit_proc, &obj_main)
add r16=16,sp // address for exit proc
;;
ld8 r15=[r16] // read exit proc
add sp=16,sp // readjust stack
mov b7=r8 // address of real _start
;;
alloc r2=ar.pfs,0,0,3,0 // dump register frame
mov out2=r15
br.call.sptk.many rp=b7 // transfer to main program
br.call.sptk.many rp=exit // die
END(_rtld_start)
/*
* _rtld_bind_start: lookup a lazy binding and transfer to real target
*
* Arguments:
* r1 gp value for rtld
* r15 Index in plt
* r16 Obj_Entry of caller
* in0-in7 Arguments for target procedure
* rp Return address back to caller
*/
ENTRY(_rtld_bind_start, 0)
{ .mii
alloc loc0=ar.pfs,8,6,3,0 // space to save r8-r11
add r17=16-8*16,sp // leave 16 bytes for _rtld_bind
add r18=32-8*16,sp
;;
} { .mii
mov loc2=r8 // structure return address
add sp=-8*16,sp // space to save f8-f15
mov loc1=rp
;;
} { .mii
stf.spill [r17]=f8,32 // save float arguments
mov loc3=r9 // language specific
mov loc4=r10 // language specific
} { .mii
stf.spill [r18]=f9,32
mov loc5=r11 // language specific
shl out1=r15,4 // 16 * index
;;
} { .mmi
stf.spill [r17]=f10,32
stf.spill [r18]=f11,32
mov out0=r16 // Obj_Entry for caller
;;
} { .mmi
stf.spill [r17]=f12,32
stf.spill [r18]=f13,32
shladd out1=r15,3,out1 // rela offset = 24 * index
;;
} { .mmb
stf.spill [r17]=f14,32
stf.spill [r18]=f15,32
br.call.sptk.many rp=_rtld_bind
} { .mii
ld8 r14=[r8],8 // target address
add r17=16,sp
add r18=32,sp
;;
} { .mii
ld8 r1=[r8] // target gp
mov ar.pfs=loc0 // clean up
mov rp=loc1
} { .mmi
ldf.fill f8=[r17],32 // restore float arguments
ldf.fill f9=[r18],32
mov r8=loc2 // restore structure pointer
;;
} { .mmi
ldf.fill f10=[r17],32
ldf.fill f11=[r18],32
mov r9=loc3
;;
} { .mmi
ldf.fill f12=[r17],32
ldf.fill f13=[r18],32
mov r10=loc4
;;
} { .mmi
ldf.fill f14=[r17],32
ldf.fill f15=[r18],32
mov r11=loc5
;;
} { .mii
nop.m 0
mov b7=r14
add sp=8*16,sp
;;
} { .mib
alloc r14=ar.pfs,0,0,8,0 // drop our register frame
nop.i 0
br.sptk.many b7 // jump to target
}
END(_rtld_bind_start)
/*
* int cmp0_and_store_int(volatile int *p, int newval);
*
* If an int holds 0, store newval into it; else do nothing. Returns
* the previous value.
*/
ENTRY(cmp0_and_store_int, 2)
mov ar.ccv=0
;;
cmpxchg4.acq r8=[in0],in1,ar.ccv
br.ret.sptk.many rp
END(cmp0_and_store_int)
ENTRY(atomic_add_int, 2)
1: ld4 r14=[in0]
;;
mov ar.ccv=r14
add r15=in1,r14
;;
cmpxchg4.acq r16=[in0],r15,ar.ccv
;;
cmp.ne p6,p0=r14,r16
(p6) br.cond.spnt.few 1b
br.ret.sptk.many rp
END(atomic_add_int)
/* Atomically increment an int. */
ENTRY(atomic_incr_int, 1)
1: ld4 r14=[in0]
;;
mov ar.ccv=r14
add r15=1,r14
;;
cmpxchg4.acq r16=[in0],r15,ar.ccv
;;
cmp.ne p6,p0=r14,r16
(p6) br.cond.spnt.few 1b
br.ret.sptk.many rp
END(atomic_incr_int)
/* Atomically decrement an int. */
ENTRY(atomic_decr_int, 1)
1: ld4 r14=[in0]
;;
mov ar.ccv=r14
add r15=-1,r14
;;
cmpxchg4.acq r16=[in0],r15,ar.ccv
;;
cmp.ne p6,p0=r14,r16
(p6) br.cond.spnt.few 1b
br.ret.sptk.many rp
END(atomic_decr_int)
#define DT_NULL 0 /* Terminating entry. */
#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */
#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */
#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */
#define R_IA64_NONE 0 /* None */
#define R_IA64_DIR64LSB 0x27 /* word64 LSB S + A */
#define R_IA64_REL64LSB 0x6f /* word64 LSB BD + A */
/*
* _rtld_reloc: relocate the rtld image, apart from @fptrs.
*
* Assumes that rtld was linked at zero and that we only need to
* handle REL64LSB and DIR64LSB relocations.
*
* Arguments:
* r1 gp value for rtld
* in0 rtld base address
*/
STATIC_ENTRY(_rtld_reloc, 1)
alloc loc0=ar.pfs,1,2,0,0
mov loc1=rp
;;
movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc.
;;
add r15=r15,gp // relocate _DYNAMIC etc.
;;
1: ld8 r16=[r15],8 // read r15->d_tag
;;
ld8 r17=[r15],8 // and r15->d_val
;;
cmp.eq p6,p0=DT_NULL,r16 // done?
(p6) br.cond.dpnt.few 2f
;;
cmp.eq p6,p0=DT_RELA,r16
;;
(p6) add r18=r17,in0 // found rela section
;;
cmp.eq p6,p0=DT_RELASZ,r16
;;
(p6) mov r19=r17 // found rela size
;;
cmp.eq p6,p0=DT_RELAENT,r16
;;
(p6) mov r22=r17 // found rela entry size
;;
br.sptk.few 1b
2:
ld8 r15=[r18],8 // read r_offset
;;
ld8 r16=[r18],8 // read r_info
add r15=r15,in0 // relocate r_offset
;;
ld8 r17=[r18],8 // read r_addend
sub r19=r19,r22 // update relasz
extr.u r23=r16,0,32 // ELF64_R_TYPE(r16)
;;
cmp.eq p6,p0=R_IA64_NONE,r23
(p6) br.cond.dpnt.few 3f
;;
cmp.eq p6,p0=R_IA64_DIR64LSB,r23
;;
(p6) br.cond.dptk.few 4f
;;
cmp.eq p6,p0=R_IA64_REL64LSB,r23
;;
(p6) br.cond.dptk.few 4f
;;
3: cmp.ltu p6,p0=0,r19 // more?
(p6) br.cond.dptk.few 2b // loop
mov r8=0 // success return value
;;
br.cond.sptk.few 9f // done
4:
ld8 r16=[r15] // read value
;;
add r16=r16,in0 // relocate it
;;
st8 [r15]=r16 // and store it back
br.cond.sptk.few 3b
9:
mov ar.pfs=loc0
mov rp=loc1
;;
br.ret.sptk.few rp
END(_rtld_reloc)

View File

@ -35,6 +35,7 @@
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "rtld.h"
static int protflags(int); /* Elf flags -> mmap protection */

View File

@ -437,7 +437,14 @@ _rtld_bind(Obj_Entry *obj, Elf_Word reloff)
defobj->strtab + def->st_name, basename(obj->path),
(void *)target, basename(defobj->path));
reloc_jmpslot(where, target);
/*
* Write the new contents for the jmpslot. Note that depending on
* architecture, the value which we need to return back to the
* lazy binding trampoline may or may not be the target
* address. The value returned from reloc_jmpslot() is the value
* that the trampoline needs.
*/
target = reloc_jmpslot(where, target, defobj);
rlock_release();
return target;
}
@ -572,7 +579,7 @@ digest_dynamic(Obj_Entry *obj)
case DT_HASH:
{
const Elf_Addr *hashtab = (const Elf_Addr *)
const Elf_Hashelt *hashtab = (const Elf_Hashelt *)
(obj->relocbase + dynp->d_un.d_ptr);
obj->nbuckets = hashtab[0];
obj->nchains = hashtab[1];
@ -863,8 +870,10 @@ find_symdef(unsigned long symnum, const Obj_Entry *refobj,
cache[symnum].sym = def;
cache[symnum].obj = defobj;
}
} else
_rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
} else {
if (refobj != &obj_rtld)
_rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
}
return def;
}
@ -1031,15 +1040,19 @@ initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list)
objlist_push_head(&list_fini, obj);
}
#ifndef FPTR_TARGET
#define FPTR_TARGET(f) ((Elf_Addr) (f))
#endif
static bool
is_exported(const Elf_Sym *def)
{
func_ptr_type value;
Elf_Addr value;
const func_ptr_type *p;
value = (func_ptr_type)(obj_rtld.relocbase + def->st_value);
for (p = exports; *p != NULL; p++)
if (*p == value)
value = (Elf_Addr)(obj_rtld.relocbase + def->st_value);
for (p = exports; *p != NULL; p++)
if (FPTR_TARGET(*p) == value)
return true;
return false;
}
@ -1651,7 +1664,19 @@ dlsym(void *handle, const char *name)
if (def != NULL) {
rlock_release();
return defobj->relocbase + def->st_value;
/*
* The value required by the caller is derived from the value
* of the symbol. For the ia64 architecture, we need to
* construct a function descriptor which the caller can use to
* call the function with the right 'gp' value. For other
* architectures and for non-functions, the value is simply
* the relocated value of the symbol.
*/
if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
return make_function_pointer(def, defobj);
else
return defobj->relocbase + def->st_value;
}
_rtld_error("Undefined symbol \"%s\"", name);

View File

@ -137,9 +137,9 @@ typedef struct Struct_Obj_Entry {
const char *strtab; /* String table */
unsigned long strsize; /* Size in bytes of string table */
const Elf_Addr *buckets; /* Hash table buckets array */
const Elf_Hashelt *buckets; /* Hash table buckets array */
unsigned long nbuckets; /* Number of buckets */
const Elf_Addr *chains; /* Hash table chain array */
const Elf_Hashelt *chains; /* Hash table chain array */
unsigned long nchains; /* Number of chains */
const char *rpath; /* Search path specified in object */
@ -161,6 +161,7 @@ typedef struct Struct_Obj_Entry {
Objlist dagmembers; /* DAG has these members (%) */
dev_t dev; /* Object's filesystem's device */
ino_t ino; /* Object's inode number */
void *priv; /* Platform-dependant */
} Obj_Entry;
#define RTLD_MAGIC 0xd550b87a