diff --git a/libexec/rtld-elf/Makefile b/libexec/rtld-elf/Makefile index d6df617d424d..2f5d574ada60 100644 --- a/libexec/rtld-elf/Makefile +++ b/libexec/rtld-elf/Makefile @@ -11,7 +11,11 @@ MAN= rtld.1 CSTD?= gnu99 CFLAGS+= -Wall -DFREEBSD_ELF -DIN_RTLD CFLAGS+= -I${.CURDIR}/${MACHINE_ARCH} -I${.CURDIR} +.if ${MACHINE_ARCH} == "powerpc64" +LDFLAGS+= -nostdlib -e _rtld_start +.else LDFLAGS+= -nostdlib -e .rtld_start +.endif WARNS?= 2 INSTALLFLAGS= -C -b PRECIOUSPROG= diff --git a/libexec/rtld-elf/Symbol.map b/libexec/rtld-elf/Symbol.map index ce1e3e5f2b35..04fe9b84f77f 100644 --- a/libexec/rtld-elf/Symbol.map +++ b/libexec/rtld-elf/Symbol.map @@ -15,6 +15,9 @@ FBSD_1.0 { dlinfo; dl_iterate_phdr; r_debug_state; +#ifdef __powerpc64__ + .r_debug_state; +#endif __tls_get_addr; }; diff --git a/libexec/rtld-elf/powerpc64/Makefile.inc b/libexec/rtld-elf/powerpc64/Makefile.inc new file mode 100644 index 000000000000..e8c0da7a1d7e --- /dev/null +++ b/libexec/rtld-elf/powerpc64/Makefile.inc @@ -0,0 +1 @@ +# $FreeBSD$ diff --git a/libexec/rtld-elf/powerpc64/reloc.c b/libexec/rtld-elf/powerpc64/reloc.c new file mode 100644 index 000000000000..b35be1806529 --- /dev/null +++ b/libexec/rtld-elf/powerpc64/reloc.c @@ -0,0 +1,493 @@ +/* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */ + +/*- + * Copyright (C) 1998 Tsubai Masanari + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" + +struct funcdesc { + Elf_Addr addr; + Elf_Addr toc; + Elf_Addr env; +}; + +/* + * Process the R_PPC_COPY relocations + */ +int +do_copy_relocations(Obj_Entry *dstobj) +{ + const Elf_Rela *relalim; + const Elf_Rela *rela; + + /* + * COPY relocs are invalid outside of the main program + */ + assert(dstobj->mainprog); + + relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + + dstobj->relasize); + for (rela = dstobj->rela; rela < relalim; rela++) { + void *dstaddr; + const Elf_Sym *dstsym; + const char *name; + unsigned long hash; + size_t size; + const void *srcaddr; + const Elf_Sym *srcsym = NULL; + Obj_Entry *srcobj; + const Ver_Entry *ve; + + if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) { + continue; + } + + dstaddr = (void *) (dstobj->relocbase + rela->r_offset); + dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); + name = dstobj->strtab + dstsym->st_name; + hash = elf_hash(name); + size = dstsym->st_size; + ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info)); + + for (srcobj = dstobj->next; srcobj != NULL; + srcobj = srcobj->next) { + if ((srcsym = symlook_obj(name, hash, srcobj, ve, 0)) + != NULL) { + break; + } + } + + if (srcobj == NULL) { + _rtld_error("Undefined symbol \"%s\" " + " referenced from COPY" + " relocation in %s", name, dstobj->path); + return (-1); + } + + srcaddr = (const void *) (srcobj->relocbase+srcsym->st_value); + memcpy(dstaddr, srcaddr, size); + dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size); + } + + return (0); +} + + +/* + * Perform early relocation of the run-time linker image + */ +void +reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase) +{ + const Elf_Rela *rela = 0, *relalim; + Elf_Addr relasz = 0; + Elf_Addr *where; + + /* + * Extract the rela/relasz values from the dynamic section + */ + for (; dynp->d_tag != DT_NULL; dynp++) { + switch (dynp->d_tag) { + case DT_RELA: + rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr); + break; + case DT_RELASZ: + relasz = dynp->d_un.d_val; + break; + } + } + + /* + * Relocate these values + */ + relalim = (const Elf_Rela *)((caddr_t)rela + relasz); + for (; rela < relalim; rela++) { + where = (Elf_Addr *)(relocbase + rela->r_offset); + *where = (Elf_Addr)(relocbase + rela->r_addend); + } +} + + +/* + * Relocate a non-PLT object with addend. + */ +static int +reloc_nonplt_object(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, + SymCache *cache) +{ + Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + const Elf_Sym *def; + const Obj_Entry *defobj; + Elf_Addr tmp; + + switch (ELF_R_TYPE(rela->r_info)) { + + case R_PPC_NONE: + break; + + case R_PPC64_ADDR64: /* doubleword64 S + A */ + case R_PPC_GLOB_DAT: + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + if (def == NULL) { + return (-1); + } + + tmp = (Elf_Addr)(defobj->relocbase + def->st_value + + rela->r_addend); + + /* Don't issue write if unnecessary; avoid COW page fault */ + if (*where != tmp) { + *where = tmp; + } + break; + + case R_PPC_RELATIVE: /* doubleword64 B + A */ + tmp = (Elf_Addr)(obj->relocbase + rela->r_addend); + + /* As above, don't issue write unnecessarily */ + if (*where != tmp) { + *where = tmp; + } + break; + + case R_PPC_COPY: + /* + * These are deferred until all other relocations + * have been done. All we do here is make sure + * that the COPY relocation is not in a shared + * library. They are allowed only in executable + * files. + */ + if (!obj->mainprog) { + _rtld_error("%s: Unexpected R_COPY " + " relocation in shared library", + obj->path); + return (-1); + } + break; + + case R_PPC_JMP_SLOT: + /* + * These will be handled by the plt/jmpslot routines + */ + break; + + case R_PPC64_DTPMOD64: + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + + if (def == NULL) + return (-1); + + *where = (Elf_Addr) defobj->tlsindex; + + break; + + case R_PPC64_TPREL64: + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + + if (def == NULL) + return (-1); + + /* + * We lazily allocate offsets for static TLS as we + * see the first relocation that references the + * TLS block. This allows us to support (small + * amounts of) static TLS in dynamically loaded + * modules. If we run out of space, we generate an + * error. + */ + if (!defobj->tls_done) { + if (!allocate_tls_offset((Obj_Entry*) defobj)) { + _rtld_error("%s: No space available for static " + "Thread Local Storage", obj->path); + return (-1); + } + } + + *(Elf_Addr **)where = *where * sizeof(Elf_Addr) + + (Elf_Addr *)(def->st_value + rela->r_addend + + defobj->tlsoffset - TLS_TP_OFFSET); + + break; + + case R_PPC64_DTPREL64: + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + + if (def == NULL) + return (-1); + + *where += (Elf_Addr)(def->st_value + rela->r_addend + - TLS_DTV_OFFSET); + + break; + + default: + _rtld_error("%s: Unsupported relocation type %ld" + " in non-PLT relocations\n", obj->path, + ELF_R_TYPE(rela->r_info)); + return (-1); + } + return (0); +} + + +/* + * Process non-PLT relocations + */ +int +reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) +{ + const Elf_Rela *relalim; + const Elf_Rela *rela; + SymCache *cache; + int bytes = obj->nchains * sizeof(SymCache); + int r = -1; + + /* + * The dynamic loader may be called from a thread, we have + * limited amounts of stack available so we cannot use alloca(). + */ + if (obj != obj_rtld) { + cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, + -1, 0); + if (cache == MAP_FAILED) + cache = NULL; + } else + cache = NULL; + + /* + * From the SVR4 PPC ABI: + * "The PowerPC family uses only the Elf32_Rela relocation + * entries with explicit addends." + */ + relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize); + for (rela = obj->rela; rela < relalim; rela++) { + if (reloc_nonplt_object(obj_rtld, obj, rela, cache) < 0) + goto done; + } + r = 0; +done: + if (cache) { + munmap(cache, bytes); + } + return (r); +} + + +/* + * Initialise a PLT slot to the resolving trampoline + */ +static int +reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela) +{ + Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + Elf_Addr *glink; + long reloff; + + reloff = rela - obj->pltrela; + + if (obj->priv == NULL) + obj->priv = malloc(obj->pltrelasize); + glink = obj->priv + reloff*sizeof(Elf_Addr)*2; + + if ((reloff < 0) || (reloff >= 0x8000)) { + return (-1); + } + + dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%p", (void *)where, reloff, glink); + + memcpy(where, _rtld_bind_start, sizeof(struct funcdesc)); + ((struct funcdesc *)(where))->env = (Elf_Addr)glink; + *(glink++) = (Elf_Addr)obj; + *(glink++) = reloff*sizeof(Elf_Rela); + + return (0); +} + + +/* + * Process the PLT relocations. + */ +int +reloc_plt(Obj_Entry *obj) +{ + const Elf_Rela *relalim; + const Elf_Rela *rela; + + if (obj->pltrelasize != 0) { + relalim = (const Elf_Rela *)((char *)obj->pltrela + + obj->pltrelasize); + for (rela = obj->pltrela; rela < relalim; rela++) { + assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); + + if (reloc_plt_object(obj, rela) < 0) { + return (-1); + } + } + } + + return (0); +} + + +/* + * LD_BIND_NOW was set - force relocation for all jump slots + */ +int +reloc_jmpslots(Obj_Entry *obj) +{ + const Obj_Entry *defobj; + const Elf_Rela *relalim; + const Elf_Rela *rela; + const Elf_Sym *def; + Elf_Addr *where; + Elf_Addr target; + + relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); + for (rela = obj->pltrela; rela < relalim; rela++) { + assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT); + where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + true, NULL); + if (def == NULL) { + dbg("reloc_jmpslots: sym not found"); + return (-1); + } + + target = (Elf_Addr)(defobj->relocbase + def->st_value); + +#if 0 + /* PG XXX */ + dbg("\"%s\" in \"%s\" --> %p in \"%s\"", + defobj->strtab + def->st_name, basename(obj->path), + (void *)target, basename(defobj->path)); +#endif + + reloc_jmpslot(where, target, defobj, obj, + (const Elf_Rel *) rela); + } + + obj->jmpslots_done = true; + + return (0); +} + + +/* + * Update the value of a PLT jump slot. + */ +Elf_Addr +reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj, + const Obj_Entry *obj, const Elf_Rel *rel) +{ + dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)", + (void *)wherep, (void *)target, *(Elf_Addr *)target, + (Elf_Addr)defobj->relocbase); + + /* + * At the PLT entry pointed at by `wherep', construct + * a direct transfer to the now fully resolved function + * address. + */ + + memcpy(wherep, (void *)target, sizeof(struct funcdesc)); + if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) { + /* + * XXX: It is possible (e.g. LD_BIND_NOW) that the function + * descriptor we are copying has not yet been relocated. + * If this happens, fix it. + */ + + ((struct funcdesc *)(wherep))->addr += + (Elf_Addr)defobj->relocbase; + ((struct funcdesc *)(wherep))->toc += + (Elf_Addr)defobj->relocbase; + } + + __asm __volatile("dcbst 0,%0; sync" :: "r"(wherep) : "memory"); + + return (target); +} + +void +init_pltgot(Obj_Entry *obj) +{ +} + +void +allocate_initial_tls(Obj_Entry *list) +{ + register Elf_Addr **tp __asm__("r13"); + Elf_Addr **_tp; + + /* + * Fix the size of the static TLS block by using the maximum + * offset allocated so far and adding a bit for dynamic modules to + * use. + */ + + tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; + + _tp = (Elf_Addr **) ((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16) + + TLS_TP_OFFSET + TLS_TCB_SIZE); + + /* + * XXX gcc seems to ignore 'tp = _tp;' + */ + + __asm __volatile("mr %0,%1" : "=r"(tp) : "r"(_tp)); +} + +void* +__tls_get_addr(tls_index* ti) +{ + register Elf_Addr **tp __asm__("r13"); + char *p; + + p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET + - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset); + + return (p + TLS_DTV_OFFSET); +} diff --git a/libexec/rtld-elf/powerpc64/rtld_machdep.h b/libexec/rtld-elf/powerpc64/rtld_machdep.h new file mode 100644 index 000000000000..3c08ea599455 --- /dev/null +++ b/libexec/rtld-elf/powerpc64/rtld_machdep.h @@ -0,0 +1,79 @@ +/*- + * Copyright (c) 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef RTLD_MACHDEP_H +#define RTLD_MACHDEP_H 1 + +#include +#include + +struct Struct_Obj_Entry; + +/* Return the address of the .dynamic section in the dynamic linker. */ +#define rtld_dynamic(obj) (&_DYNAMIC) + +Elf_Addr reloc_jmpslot(Elf_Addr *where, Elf_Addr target, + const struct Struct_Obj_Entry *defobj, + const struct Struct_Obj_Entry *obj, + const Elf_Rel *rel); + +#define make_function_pointer(def, defobj) \ + ((defobj)->relocbase + (def)->st_value) + +#define call_initfini_pointer(obj, target) \ + (((InitFunc)(target))()) + +/* + * Lazy binding entry point, called via PLT. + */ +void _rtld_bind_start(void); + +/* + * TLS + */ + +#define TLS_TP_OFFSET 0x7000 +#define TLS_DTV_OFFSET 0x8000 +#define TLS_TCB_SIZE 16 + +#define round(size, align) \ + (((size) + (align) - 1) & ~((align) - 1)) +#define calculate_first_tls_offset(size, align) \ + round(16, align) +#define calculate_tls_offset(prev_offset, prev_size, size, align) \ + round(prev_offset + prev_size, align) +#define calculate_tls_end(off, size) ((off) + (size)) + +typedef struct { + unsigned long ti_module; + unsigned long ti_offset; +} tls_index; + +extern void *__tls_get_addr(tls_index* ti); + +#endif diff --git a/libexec/rtld-elf/powerpc64/rtld_start.S b/libexec/rtld-elf/powerpc64/rtld_start.S new file mode 100644 index 000000000000..c44f880f6c83 --- /dev/null +++ b/libexec/rtld-elf/powerpc64/rtld_start.S @@ -0,0 +1,161 @@ +/* $NetBSD: rtld_start.S,v 1.4 2001/09/26 04:06:43 mycroft Exp $ */ + +/*- + * Copyright (C) 1998 Tsubai Masanari + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + +.extern _GLOBAL_OFFSET_TABLE_ +.extern _DYNAMIC + +_ENTRY(_rtld_start) + stdu %r1,-96(%r1) /* 16-byte aligned stack for reg saves + + exit_proc & obj _rtld args + + backchain & lrsave stack frame */ + std %r3,48(%r1) /* argc */ + std %r4,56(%r1) /* argv */ + std %r5,64(%r1) /* envp */ +/* std %r6,72(%r1) *//* obj (always 0) */ +/* std %r7,80(%r1) *//* cleanup (always 0) */ + std %r8,88(%r1) /* ps_strings */ + + /* + * Perform initial relocation of ld-elf.so. Not as easy as it + * sounds. + * - perform small forward branch to put PC into link reg + * - use link-time constants to determine offset to the + * _DYNAMIC section and the GOT. Add these to the PC to + * convert to absolute addresses. + * - call reloc_non_plt_self() to fix up ld-elf.so's relocations + */ + + bl 1f + .llong _DYNAMIC-. +1: + mflr %r3 /* PC value at .llong */ + ld %r4,0(%r3) /* offset to _DYNAMIC */ + add %r3,%r4,%r3 /* r3 = &_DYNAMIC, absolute value */ + + ld %r4,-0x8000(%r2) /* First TOC entry is TOC base */ + subf %r4,%r4,%r2 /* Subtract from real TOC base to get base */ + + bl .reloc_non_plt_self /* reloc_non_plt_self(&_DYNAMIC,base) */ + nop + + /* + * The _rtld() function likes to see a stack layout containing + * { argc, argv[0], argv[1] ... argv[N], 0, env[0], ... , env[N] } + * Since the PowerPC stack was 16-byte aligned at exec time, the + * original stack layout has to be found by moving back a word + * from the argv pointer. + */ + ld %r4,56(%r1) + addi %r3,%r4,-8 /* locate argc ptr, &argv[-1] */ + addi %r4,%r1,80 /* &exit_proc on stack */ + addi %r5,%r1,72 /* &obj_main on stack */ + + bl ._rtld /* &_start = _rtld(sp, &exit_proc, &obj_main)*/ + nop + ld %r2,8(%r3) + ld %r11,16(%r3) + ld %r3,0(%r3) + mtlr %r3 + + /* + * Restore args, with new obj/exit proc + */ + ld %r3,48(%r1) /* argc */ + ld %r4,56(%r1) /* argv */ + ld %r5,64(%r1) /* envp */ + ld %r6,72(%r1) /* obj */ + ld %r7,80(%r1) /* exit proc */ + ld %r8,88(%r1) /* ps_strings */ + + blrl /* _start(argc, argv, envp, obj, cleanup, ps_strings) */ + + li %r0,1 /* _exit() */ + sc + +/* + * _rtld_bind_start() + * + * Call into the MI binder. This routine is reached via the PLT call cell + * On entry, %r11 contains a pointer to the (object, relocation) tuple. + * + * Save all registers, call into the binder to resolve and fixup the external + * routine, and then transfer to the external routine on return. + */ + .globl _rtld_bind + +_ENTRY(_rtld_bind_start) + mflr %r0 + std %r0,16(%r1) # save lr + mfcr %r0 + std %r0,8(%r1) # save cr + + stdu %r1,-48-9*8(%r1) # stack space for 8 regs + header + std %r3,48+0*8(%r1) # save r3-r31 + std %r4,48+1*8(%r1) + std %r5,48+2*8(%r1) + std %r6,48+3*8(%r1) + std %r7,48+4*8(%r1) + std %r8,48+5*8(%r1) + std %r9,48+6*8(%r1) + std %r10,48+7*8(%r1) + std %r12,48+8*8(%r1) + + ld %r3,0(%r11) + ld %r4,8(%r11) + bl ._rtld_bind # target addr = _rtld_bind(obj, reloff) + nop + + ld %r2,8(%r3) + ld %r11,16(%r3) + ld %r3,0(%r3) + mtctr %r3 # move absolute target addr into ctr + + ld %r3,48+0*8(%r1) # restore r3-r31 + ld %r4,48+1*8(%r1) + ld %r5,48+2*8(%r1) + ld %r6,48+3*8(%r1) + ld %r7,48+4*8(%r1) + ld %r8,48+5*8(%r1) + ld %r9,48+6*8(%r1) + ld %r10,48+7*8(%r1) + ld %r12,48+8*8(%r1) + + addi %r1,%r1,48+9*8 # restore stack + + ld %r0,8(%r1) # restore cr + mtcr %r0 + ld %r0,16(%r1) # restore lr + mtlr %r0 + + bctr # jump to target +