Add the arm64 code to the runtime linker. It's not able to be built as we

still need libc_pic for a few things, but this is expected to be ready
soon.

Differential Revision:	https://reviews.freebsd.org/D2136
Reviewed by:	kib
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Andrew Turner 2015-03-31 09:51:19 +00:00
parent b41853c8fe
commit 047c6e3ae6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=280903
4 changed files with 515 additions and 1 deletions

View File

@ -0,0 +1,317 @@
/*-
* Copytight (c) 2014-2015 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <stdlib.h>
#include "debug.h"
#include "rtld.h"
#include "rtld_printf.h"
/*
* It is possible for the compiler to emit relocations for unaligned data.
* We handle this situation with these inlines.
*/
#define RELOC_ALIGNED_P(x) \
(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
/*
* This is not the correct prototype, but we only need it for
* a function pointer to a simple asm function.
*/
void *_rtld_tlsdesc(void *);
void _exit(int);
void
init_pltgot(Obj_Entry *obj)
{
if (obj->pltgot != NULL) {
obj->pltgot[1] = (Elf_Addr) obj;
obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
}
}
int
do_copy_relocations(Obj_Entry *dstobj)
{
const Obj_Entry *srcobj, *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *srcsym;
const Elf_Sym *dstsym;
const void *srcaddr;
const char *name;
void *dstaddr;
SymLook req;
size_t size;
int res;
/*
* COPY relocs are invalid outside of the main program
*/
assert(dstobj->mainprog);
relalim = (const Elf_Rela *)((char *)dstobj->rela +
dstobj->relasize);
for (rela = dstobj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
continue;
dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
name = dstobj->strtab + dstsym->st_name;
size = dstsym->st_size;
symlook_init(&req, name);
req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
req.flags = SYMLOOK_EARLY;
for (srcobj = dstobj->next; srcobj != NULL;
srcobj = srcobj->next) {
res = symlook_obj(&req, srcobj);
if (res == 0) {
srcsym = req.sym_out;
defobj = req.defobj_out;
break;
}
}
if (srcobj == NULL) {
_rtld_error(
"Undefined symbol \"%s\" referenced from COPY relocation in %s",
name, dstobj->path);
return (-1);
}
srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
memcpy(dstaddr, srcaddr, size);
}
return (0);
}
/*
* Process the PLT relocations.
*/
int
reloc_plt(Obj_Entry *obj)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
switch(ELF_R_TYPE(rela->r_info)) {
case R_AARCH64_JUMP_SLOT:
*where += (Elf_Addr)obj->relocbase;
break;
case R_AARCH64_TLSDESC:
if (ELF_R_SYM(rela->r_info) == 0) {
where[0] = (Elf_Addr)_rtld_tlsdesc;
where[1] = rela->r_addend;
} else {
_rtld_error("Unable to handle "
"R_AARCH64_TLSDESC with a symbol set");
return (-1);
}
break;
default:
_rtld_error("Unknown relocation type %u in PLT",
(unsigned int)ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}
/*
* LD_BIND_NOW was set - force relocation for all jump slots
*/
int
reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
const Obj_Entry *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *def;
relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
switch(ELF_R_TYPE(rela->r_info)) {
case R_AARCH64_JUMP_SLOT:
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj,
&defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
if (def == NULL) {
dbg("reloc_jmpslots: sym not found");
return (-1);
}
*where = (Elf_Addr)(defobj->relocbase + def->st_value);
break;
case R_AARCH64_TLSDESC:
break;
default:
_rtld_error("Unknown relocation type %x in jmpslot",
(unsigned int)ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}
int
reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
{
/* XXX not implemented */
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj, int flags,
struct Struct_RtldLockState *lockstate)
{
/* XXX not implemented */
return (0);
}
Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
const Obj_Entry *obj, const Elf_Rel *rel)
{
assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT);
if (*where != target)
*where = target;
return target;
}
/*
* Process non-PLT relocations
*/
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
RtldLockState *lockstate)
{
const Obj_Entry *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *def;
SymCache *cache;
Elf_Addr *where;
unsigned long symnum;
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
if (obj == obj_rtld)
cache = NULL;
else
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
symnum = ELF_R_SYM(rela->r_info);
switch (ELF_R_TYPE(rela->r_info)) {
case R_AARCH64_ABS64:
case R_AARCH64_GLOB_DAT:
def = find_symdef(symnum, obj, &defobj, flags, cache,
lockstate);
if (def == NULL)
return (-1);
*where = (Elf_Addr)defobj->relocbase + def->st_value;
break;
case R_AARCH64_COPY:
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the
* COPY relocation is not in a shared library. They
* are allowed only in executable files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_AARCH64_COPY "
"relocation in shared library", obj->path);
return (-1);
}
break;
case R_AARCH64_RELATIVE:
*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
break;
default:
rtld_printf("%s: Unhandled relocation %lu\n",
obj->path, ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}
void
allocate_initial_tls(Obj_Entry *objs)
{
Elf_Addr **tp;
/*
* Fix the size of the static TLS block by using the maximum
* offset allocated so far and adding a bit for dynamic modules to
* use.
*/
tls_static_space = tls_last_offset + tls_last_size +
RTLD_STATIC_TLS_EXTRA;
tp = (Elf_Addr **) allocate_tls(objs, NULL, TLS_TCB_SIZE, 16);
asm volatile("msr tpidr_el0, %0" : : "r"(tp));
}

View File

@ -0,0 +1,83 @@
/*-
* Copyright (c) 1999, 2000 John D. Polstra.
* Copyright (c) 2014 the FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
#include <sys/types.h>
#include <machine/atomic.h>
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) \
({ \
Elf_Addr _dynamic_addr; \
asm volatile("adr %0, _DYNAMIC" : "=&r"(_dynamic_addr)); \
(const Elf_Dyn *)_dynamic_addr; \
})
#define RTLD_IS_DYNAMIC() (1)
Elf_Addr reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const struct Struct_Obj_Entry *defobj,
const struct Struct_Obj_Entry *obj,
const Elf_Rel *rel);
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
#define call_initfini_pointer(obj, target) \
(((InitFunc)(target))())
#define call_init_pointer(obj, target) \
(((InitArrFunc)(target))(main_argc, main_argv, environ))
#define round(size, align) \
(((size) + (align) - 1) & ~((align) - 1))
#define calculate_first_tls_offset(size, align) \
round(size, align)
#define calculate_tls_offset(prev_offset, prev_size, size, align) \
round((prev_offset) + (size), align)
#define calculate_tls_end(off, size) ((off) + (size))
#define TLS_TCB_SIZE 8
typedef struct {
unsigned long ti_module;
unsigned long ti_offset;
} tls_index;
extern void *__tls_get_addr(tls_index *ti);
#define RTLD_DEFAULT_STACK_PF_EXEC PF_X
#define RTLD_DEFAULT_STACK_EXEC PROT_EXEC
#endif

View File

@ -0,0 +1,113 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
ENTRY(.rtld_start)
mov x19, x0 /* Put ps_strings in a callee-saved register */
mov x20, sp /* And the stack pointer */
sub x8, x20, #16 /* Make room for obj_main & exit proc */
mov sp, x8 /* Update the stack pointer */
mov x0, x20 /* Pass the stack we were given to _rtld */
mov x1, sp /* exit_proc */
add x2, x1, #8 /* obj_main */
bl _rtld /* Call the loader */
mov x8, x0 /* Backup the entry point */
ldr x2, [sp] /* Load cleanup */
ldr x1, [sp, #8] /* Load obj_main */
mov x0, x19 /* Restore ps_strings */
mov sp, x20 /* Restore the stack pointer */
br x8 /* Jump to the entry point */
END(.rtld_start)
/*
* sp + 0 = &GOT[x + 3]
* sp + 8 = RA
* x16 = &GOT[2]
* x17 = &_rtld_bind_start
*/
ENTRY(_rtld_bind_start)
mov x17, sp
/* Save the arguments */
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
/* Calculate reloff */
ldr x2, [x17, #0] /* Get the address of the entry */
sub x1, x2, x16 /* Find its offset */
sub x1, x1, #8 /* Adjust for x16 not being at offset 0 */
/* Each rela item has 3 entriesso we need reloff = 3 * index */
lsl x3, x1, #1 /* x3 = 2 * offset */
add x1, x1, x3 /* x1 = x3 + offset = 3 * offset */
/* Load obj */
ldr x0, [x16, #-8]
/* Call into rtld */
bl _rtld_bind
/* Restore the registers saved by the plt code */
ldp xzr, x30, [sp, #(4 * 16)]
/* Backup the address to branch to */
mov x16, x0
/* restore the arguments */
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
/* And the part of the stack the plt entry handled */
add sp, sp, #16
/* Call into the correct function */
br x16
END(_rtld_bind_start)
/*
* uint64_t _rtld_tlsdesc(struct tlsdesc *);
*
* struct tlsdesc {
* uint64_t ptr;
* uint64_t data;
* };
*
* Returns the data.
*/
ENTRY(_rtld_tlsdesc)
ldr x0, [x0, #8]
RET
END(_rtld_tlsdesc)

View File

@ -4368,7 +4368,8 @@ tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
return (tls_get_addr_slow(dtvp, index, offset));
}
#if defined(__arm__) || defined(__mips__) || defined(__powerpc__)
#if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
defined(__powerpc__)
/*
* Allocate Static TLS using the Variant I method.