Import arm bits for rtld-elf.

Obtained from:	NetBSD
This commit is contained in:
Olivier Houchard 2004-05-14 12:15:51 +00:00
parent 15144b0f96
commit e659267f1e
6 changed files with 684 additions and 2 deletions

View File

@ -1,8 +1,8 @@
# $FreeBSD$
PROG?= ld-elf.so.1
SRCS= rtld_start.S rtld.c rtld_lock.c map_object.c malloc.c \
xmalloc.c debug.c reloc.c libmap.c
SRCS= reloc.c rtld.c rtld_start.S rtld_lock.c map_object.c \
malloc.c xmalloc.c debug.c libmap.c
MAN= rtld.1
CSTD?= gnu99
CFLAGS+= -Wall -DFREEBSD_ELF -DIN_RTLD

View File

@ -0,0 +1 @@
# $FreeBSD$

View File

@ -0,0 +1,171 @@
/*-
* Copyright 1999, 2000 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* from: FreeBSD: src/libexec/rtld-elf/alpha/lockdflt.c,v 1.6 2000/07/17
* $FreeBSD$
*/
/*
* Thread locking implementation for the dynamic linker.
*
* We use the "simple, non-scalable reader-preference lock" from:
*
* J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
* Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
* Principles and Practice of Parallel Programming, April 1991.
*
* In this algorithm the lock is a single word. Its low-order bit is
* set when a writer holds the lock. The remaining high-order bits
* contain a count of readers desiring the lock. The algorithm requires
* atomic "compare_and_store" and "add" operations, which we implement
* using assembly language sequences in "rtld_start.S".
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <signal.h>
#include <stdlib.h>
#include <time.h>
#include <machine/atomic.h>
#include "debug.h"
#include "rtld.h"
#include "rtld_machdep.h"
#define WAFLAG 0x1 /* A writer holds the lock */
#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
typedef struct Struct_Lock {
volatile int lock;
void *base;
} Lock;
static sigset_t fullsigmask, oldsigmask;
static void *
lock_create(void *context)
{
void *base;
char *p;
uintptr_t r;
Lock *l;
/*
* Arrange for the lock to occupy its own cache line. First, we
* optimistically allocate just a cache line, hoping that malloc
* will give us a well-aligned block of memory. If that doesn't
* work, we allocate a larger block and take a well-aligned cache
* line from it.
*/
base = xmalloc(CACHE_LINE_SIZE);
p = (char *)base;
if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
free(base);
base = xmalloc(2 * CACHE_LINE_SIZE);
p = (char *)base;
if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
p += CACHE_LINE_SIZE - r;
}
l = (Lock *)p;
l->base = base;
l->lock = 0;
return l;
}
static void
lock_destroy(void *lock)
{
Lock *l = (Lock *)lock;
free(l->base);
}
static void
rlock_acquire(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_acq_int(&l->lock, RC_INCR);
while (l->lock & WAFLAG)
; /* Spin */
}
static void
wlock_acquire(void *lock)
{
Lock *l = (Lock *)lock;
sigset_t tmp_oldsigmask;
for ( ; ; ) {
sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
break;
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
}
oldsigmask = tmp_oldsigmask;
}
static void
rlock_release(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_rel_int(&l->lock, -RC_INCR);
}
static void
wlock_release(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_rel_int(&l->lock, -WAFLAG);
sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
}
void
lockdflt_init(LockInfo *li)
{
li->context = NULL;
li->lock_create = lock_create;
li->rlock_acquire = rlock_acquire;
li->wlock_acquire = wlock_acquire;
li->rlock_release = rlock_release;
li->wlock_release = wlock_release;
li->lock_destroy = lock_destroy;
li->context_destroy = NULL;
/*
* Construct a mask to block all signals except traps which might
* conceivably be generated within the dynamic linker itself.
*/
sigfillset(&fullsigmask);
sigdelset(&fullsigmask, SIGILL);
sigdelset(&fullsigmask, SIGTRAP);
sigdelset(&fullsigmask, SIGABRT);
sigdelset(&fullsigmask, SIGEMT);
sigdelset(&fullsigmask, SIGFPE);
sigdelset(&fullsigmask, SIGBUS);
sigdelset(&fullsigmask, SIGSEGV);
sigdelset(&fullsigmask, SIGSYS);
}

View File

@ -0,0 +1,341 @@
/* $NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $ */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "rtld.h"
void
init_pltgot(Obj_Entry *obj)
{
if (obj->pltgot != NULL) {
obj->pltgot[1] = (Elf_Addr) obj;
obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
}
}
int
do_copy_relocations(Obj_Entry *dstobj)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */
rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize);
for (rel = dstobj->rel; rel < rellim; rel++) {
if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) {
void *dstaddr;
const Elf_Sym *dstsym;
const char *name;
unsigned long hash;
size_t size;
const void *srcaddr;
const Elf_Sym *srcsym;
Obj_Entry *srcobj;
dstaddr = (void *) (dstobj->relocbase + rel->r_offset);
dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info);
name = dstobj->strtab + dstsym->st_name;
hash = elf_hash(name);
size = dstsym->st_size;
for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next)
if ((srcsym = symlook_obj(name, hash, srcobj, false)) != NULL)
break;
if (srcobj == NULL) {
_rtld_error("Undefined symbol \"%s\" referenced from COPY"
" relocation in %s", name, dstobj->path);
return -1;
}
srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value);
memcpy(dstaddr, srcaddr, size);
}
}
return 0;
}
void _rtld_bind_start(void);
void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
int open();
int _open();
void
_rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
{
const Elf_Rel *rel = 0, *rellim;
Elf_Addr relsz = 0;
Elf_Addr *where;
uint32_t size;
for (; dynp->d_tag != DT_NULL; dynp++) {
switch (dynp->d_tag) {
case DT_REL:
rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
break;
case DT_RELSZ:
relsz = dynp->d_un.d_val;
break;
}
}
rellim = (const Elf_Rel *)((caddr_t)rel + relsz);
size = (rellim - 1)->r_offset - rel->r_offset;
mprotect((void*)relocbase, size, PROT_READ|PROT_WRITE|PROT_EXEC);
for (; rel < rellim; rel++) {
where = (Elf_Addr *)(relocbase + rel->r_offset);
*where += (Elf_Addr)relocbase;
}
mprotect((void*)relocbase, size, PROT_READ|PROT_EXEC);
}
/*
* It is possible for the compiler to emit relocations for unaligned data.
* We handle this situation with these inlines.
*/
#define RELOC_ALIGNED_P(x) \
(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
static __inline Elf_Addr
load_ptr(void *where)
{
Elf_Addr res;
memcpy(&res, where, sizeof(res));
return (res);
}
static __inline void
store_ptr(void *where, Elf_Addr val)
{
memcpy(where, &val, sizeof(val));
}
static int
reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache)
{
Elf_Addr *where;
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr tmp;
unsigned long symnum;
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
symnum = ELF_R_SYM(rel->r_info);
switch (ELF_R_TYPE(rel->r_info)) {
case R_ARM_NONE:
break;
#if 1 /* XXX should not occur */
case R_ARM_PC24: { /* word32 S - P + A */
Elf32_Sword addend;
/*
* Extract addend and sign-extend if needed.
*/
addend = *where;
if (addend & 0x00800000)
addend |= 0xff000000;
def = find_symdef(symnum, obj, &defobj, false, cache);
if (def == NULL)
return -1;
tmp = (Elf_Addr)obj->relocbase + def->st_value
- (Elf_Addr)where + (addend << 2);
if ((tmp & 0xfe000000) != 0xfe000000 &&
(tmp & 0xfe000000) != 0) {
_rtld_error(
"%s: R_ARM_PC24 relocation @ %p to %s failed "
"(displacement %ld (%#lx) out of range)",
obj->path, where,
obj->strtab + obj->symtab[symnum].st_name,
(long) tmp, (long) tmp);
return -1;
}
tmp >>= 2;
*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
dbg("PC24 %s in %s --> %p @ %p in %s",
obj->strtab + obj->symtab[symnum].st_name,
obj->path, (void *)*where, where, defobj->path);
break;
}
#endif
case R_ARM_ABS32: /* word32 B + S + A */
case R_ARM_GLOB_DAT: /* word32 B + S */
def = find_symdef(symnum, obj, &defobj, false, cache);
if (def == NULL)
return -1;
if (__predict_true(RELOC_ALIGNED_P(where))) {
tmp = *where + (Elf_Addr)defobj->relocbase +
def->st_value;
*where = tmp;
} else {
tmp = load_ptr(where) +
(Elf_Addr)defobj->relocbase +
def->st_value;
store_ptr(where, tmp);
}
dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
obj->strtab + obj->symtab[symnum].st_name,
obj->path, (void *)tmp, where, defobj->path);
break;
case R_ARM_RELATIVE: /* word32 B + A */
if (__predict_true(RELOC_ALIGNED_P(where))) {
tmp = *where + (Elf_Addr)obj->relocbase;
*where = tmp;
} else {
tmp = load_ptr(where) +
(Elf_Addr)obj->relocbase;
store_ptr(where, tmp);
}
dbg("RELATIVE in %s --> %p", obj->path,
(void *)tmp);
break;
case R_ARM_COPY:
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the
* COPY relocation is not in a shared library. They
* are allowed only in executable files.
*/
if (!obj->mainprog) {
_rtld_error(
"%s: Unexpected R_COPY relocation in shared library",
obj->path);
return -1;
}
dbg("COPY (avoid in main)");
break;
default:
dbg("sym = %lu, type = %lu, offset = %p, "
"contents = %p, symbol = %s",
symnum, (u_long)ELF_R_TYPE(rel->r_info),
(void *)rel->r_offset, (void *)load_ptr(where),
obj->strtab + obj->symtab[symnum].st_name);
_rtld_error("%s: Unsupported relocation type %ld "
"in non-PLT relocations\n",
obj->path, (u_long) ELF_R_TYPE(rel->r_info));
return -1;
}
return 0;
}
/*
* * Process non-PLT relocations
* */
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
SymCache *cache;
int bytes = obj->nchains * sizeof(SymCache);
int r = -1;
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
* */
cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
if (cache == MAP_FAILED)
cache = NULL;
rellim = (const Elf_Rel *)((caddr_t)obj->rel + obj->relsize);
for (rel = obj->rel; rel < rellim; rel++) {
if (reloc_nonplt_object(obj, rel, cache) < 0)
goto done;
}
r = 0;
done:
if (cache) {
munmap(cache, bytes);
}
return (r);
}
/*
* * Process the PLT relocations.
* */
int
reloc_plt(Obj_Entry *obj)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
rellim = (const Elf_Rel *)((char *)obj->pltrel +
obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
Elf_Addr *where;
assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
*where += (Elf_Addr )obj->relocbase;
}
return (0);
}
/*
* * LD_BIND_NOW was set - force relocation for all jump slots
* */
int
reloc_jmpslots(Obj_Entry *obj)
{
const Obj_Entry *defobj;
const Elf_Rel *rellim;
const Elf_Rel *rel;
const Elf_Sym *def;
Elf_Addr *where;
Elf_Addr target;
rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
true, NULL);
if (def == NULL) {
dbg("reloc_jmpslots: sym not found");
return (-1);
}
target = (Elf_Addr)(defobj->relocbase + def->st_value);
reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *) rel);
}
obj->jmpslots_done = true;
return (0);
}
Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
const Obj_Entry *obj, const Elf_Rel *rel)
{
assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
if (*where != target)
*where = target;
return target;
}

View File

@ -0,0 +1,64 @@
/*-
* Copyright (c) 1999, 2000 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
#include <sys/types.h>
#include <machine/atomic.h>
#define CACHE_LINE_SIZE 32
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#if 0
#define rtld_dynamic(obj) \
((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC))
#endif
#if 0
#define rtld_dynamic(obj) (&_DYNAMIC)
#endif
#define rtld_dynamic(obj) (const Elf_Dyn *)((obj)->relocbase)
Elf_Addr reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const struct Struct_Obj_Entry *defobj,
const struct Struct_Obj_Entry *obj,
const Elf_Rel *rel);
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
#define call_initfini_pointer(obj, target) \
(((InitFunc)(target))())
/*
* Lazy binding entry point, called via PLT.
*/
void _rtld_bind_start(void);
#endif

View File

@ -0,0 +1,105 @@
/* $NetBSD: rtld_start.S,v 1.7 2002/09/12 17:18:38 mycroft Exp $ */
/*-
* Copyright (c) 1998, 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas and by Charles M. Hannum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
.text
.align 0
.globl .rtld_start
.type .rtld_start,%function
.rtld_start:
mov r6, sp
sub sp, sp, #8 /* make room for obj_main & exit proc */
mov r4, r0 /* save ps_strings */
ldr sl, .L2
ldr r5, .L2+4
ldr r0, .L2+8
.L1:
add sl, pc, sl
ldr r5, [sl, r5]
ldr r0, [sl, r0]
sub r1, sl, r5 /* relocbase */
add r0, r1, r0 /* &_DYNAMIC */
bl _rtld_relocate_nonplt_self
mov r1, sp
add r2, sp, #4
mov r0, r6 /* sp */
bl _rtld /* call the shared loader */
mov r3, r0 /* save entry point */
ldr r1, [sp, #0] /* r2 = cleanup */
ldr r0, [sp, #4] /* r1 = obj_main */
add sp, sp, #8 /* restore stack */
#if 0
mov r0, r4 /* restore ps_strings */
#endif
mov r2, #0
mov pc, r3 /* jump to the entry point */
.L2:
.word _GLOBAL_OFFSET_TABLE_ - (.L1+8)
.word _GLOBAL_OFFSET_TABLE_(GOT)
.word _DYNAMIC(GOT)
.align 0
.globl _rtld_bind_start
.type _rtld_bind_start,%function
/*
* stack[0] = RA
* ip = &GOT[n+3]
* lr = &GOT[2]
*/
_rtld_bind_start:
stmdb sp!,{r0-r4,sl,fp}
sub r1, ip, lr /* r1 = 4 * (n + 1) */
sub r1, r1, #4 /* r1 = 4 * n */
add r1, r1, r1 /* r1 = 8 * n */
ldr r0, [lr, #-4] /* get obj ptr from GOT[1] */
mov r4, ip /* save GOT location */
bl _rtld_bind /* Call the binder */
str r0, [r4] /* save address in GOT */
mov ip, r0 /* save new address */
ldmia sp!,{r0-r4,sl,fp,lr} /* restore the stack */
mov pc, ip /* jump to the new address */