IFUNC symbol type shall be processed for non-PLT relocations,

e.g. when a global variable is initialized with a pointer to ifunc.
Add symbol type check and call resolver for STT_GNU_IFUNC symbol types
when processing non-PLT relocations, but only after non-IFUNC
relocations are done.  The two-phase proceessing is required since
resolvers may reference other symbols, which must be ready to use when
resolver calls are done.

Restructure reloc_non_plt() on x86 to call find_symdef() and handle
IFUNC in single place.

For non-x86 reloc_non_plt(), check for call for IFUNC relocation and
do nothing, to avoid processing relocs twice.

PR:	193048
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2014-08-29 09:29:10 +00:00
parent c6fef2d49a
commit 14c3564759
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=270798
9 changed files with 311 additions and 334 deletions

View File

@ -125,213 +125,186 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
const Elf_Rela *relalim;
const Elf_Rela *rela;
SymCache *cache;
int r = -1;
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr *where, symval;
Elf32_Addr *where32;
int r;
r = -1;
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
if (obj != obj_rtld) {
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
} else
cache = NULL;
cache = NULL;
relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
Elf32_Addr *where32 = (Elf32_Addr *)where;
switch (ELF_R_TYPE(rela->r_info)) {
case R_X86_64_NONE:
break;
case R_X86_64_64:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend);
}
break;
case R_X86_64_PC32:
/*
* I don't think the dynamic linker should ever see this
* type of relocation. But the binutils-2.6 tools sometimes
* generate it.
* First, resolve symbol for relocations which
* reference symbols.
*/
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase +
def->st_value + rela->r_addend - (Elf_Addr) where);
switch (ELF_R_TYPE(rela->r_info)) {
case R_X86_64_64:
case R_X86_64_PC32:
case R_X86_64_GLOB_DAT:
case R_X86_64_TPOFF64:
case R_X86_64_TPOFF32:
case R_X86_64_DTPMOD64:
case R_X86_64_DTPOFF64:
case R_X86_64_DTPOFF32:
def = find_symdef(ELF_R_SYM(rela->r_info), obj,
&defobj, flags, cache, lockstate);
if (def == NULL)
goto done;
/*
* If symbol is IFUNC, only perform relocation
* when caller allowed it by passing
* SYMLOOK_IFUNC flag. Skip the relocations
* otherwise.
*
* Also error out in case IFUNC relocations
* are specified for TLS, which cannot be
* usefully interpreted.
*/
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
switch (ELF_R_TYPE(rela->r_info)) {
case R_X86_64_64:
case R_X86_64_PC32:
case R_X86_64_GLOB_DAT:
if ((flags & SYMLOOK_IFUNC) == 0)
continue;
symval = (Elf_Addr)rtld_resolve_ifunc(
defobj, def);
break;
case R_X86_64_TPOFF64:
case R_X86_64_TPOFF32:
case R_X86_64_DTPMOD64:
case R_X86_64_DTPOFF64:
case R_X86_64_DTPOFF32:
_rtld_error("%s: IFUNC for TLS reloc",
obj->path);
goto done;
}
} else {
if ((flags & SYMLOOK_IFUNC) != 0)
continue;
symval = (Elf_Addr)defobj->relocbase +
def->st_value;
}
break;
default:
if ((flags & SYMLOOK_IFUNC) != 0)
continue;
break;
}
break;
/* missing: R_X86_64_GOT32 R_X86_64_PLT32 */
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
where32 = (Elf32_Addr *)where;
case R_X86_64_COPY:
switch (ELF_R_TYPE(rela->r_info)) {
case R_X86_64_NONE:
break;
case R_X86_64_64:
*where = symval + rela->r_addend;
break;
case R_X86_64_PC32:
/*
* I don't think the dynamic linker should
* ever see this type of relocation. But the
* binutils-2.6 tools sometimes generate it.
*/
*where32 = (Elf32_Addr)(unsigned long)(symval +
rela->r_addend - (Elf_Addr)where);
break;
/* missing: R_X86_64_GOT32 R_X86_64_PLT32 */
case R_X86_64_COPY:
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the COPY
* relocation is not in a shared library. They are allowed
* only in executable files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_X86_64_COPY "
"relocation in shared library", obj->path);
goto done;
}
break;
case R_X86_64_GLOB_DAT:
*where = symval;
break;
case R_X86_64_TPOFF64:
/*
* We lazily allocate offsets for static TLS
* as we see the first relocation that
* references the TLS block. This allows us to
* support (small amounts of) static TLS in
* dynamically loaded modules. If we run out
* of space, we generate an error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available "
"for static Thread Local Storage",
obj->path);
goto done;
}
}
*where = (Elf_Addr)(def->st_value - defobj->tlsoffset +
rela->r_addend);
break;
case R_X86_64_TPOFF32:
/*
* We lazily allocate offsets for static TLS
* as we see the first relocation that
* references the TLS block. This allows us to
* support (small amounts of) static TLS in
* dynamically loaded modules. If we run out
* of space, we generate an error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available "
"for static Thread Local Storage",
obj->path);
goto done;
}
}
*where32 = (Elf32_Addr)(def->st_value -
defobj->tlsoffset + rela->r_addend);
break;
case R_X86_64_DTPMOD64:
*where += (Elf_Addr)defobj->tlsindex;
break;
case R_X86_64_DTPOFF64:
*where += (Elf_Addr)(def->st_value + rela->r_addend);
break;
case R_X86_64_DTPOFF32:
*where32 += (Elf32_Addr)(def->st_value +
rela->r_addend);
break;
case R_X86_64_RELATIVE:
*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
break;
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the COPY
* relocation is not in a shared library. They are allowed
* only in executable files.
* missing:
* R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16,
* R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_X86_64_COPY relocation"
" in shared library", obj->path);
goto done;
}
break;
case R_X86_64_GLOB_DAT:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
default:
_rtld_error("%s: Unsupported relocation type %u"
" in non-PLT relocations\n", obj->path,
(unsigned int)ELF_R_TYPE(rela->r_info));
goto done;
*where = (Elf_Addr) (defobj->relocbase + def->st_value);
}
break;
case R_X86_64_TPOFF64:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
* TLS block. This allows us to support (small
* amounts of) static TLS in dynamically loaded
* modules. If we run out of space, we generate an
* error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available for static "
"Thread Local Storage", obj->path);
goto done;
}
}
*where = (Elf_Addr) (def->st_value - defobj->tlsoffset +
rela->r_addend);
}
break;
case R_X86_64_TPOFF32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
* TLS block. This allows us to support (small
* amounts of) static TLS in dynamically loaded
* modules. If we run out of space, we generate an
* error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available for static "
"Thread Local Storage", obj->path);
goto done;
}
}
*where32 = (Elf32_Addr) (def->st_value -
defobj->tlsoffset +
rela->r_addend);
}
break;
case R_X86_64_DTPMOD64:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where += (Elf_Addr) defobj->tlsindex;
}
break;
case R_X86_64_DTPOFF64:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where += (Elf_Addr) (def->st_value + rela->r_addend);
}
break;
case R_X86_64_DTPOFF32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where32 += (Elf32_Addr) (def->st_value + rela->r_addend);
}
break;
case R_X86_64_RELATIVE:
*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
break;
/* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */
default:
_rtld_error("%s: Unsupported relocation type %u"
" in non-PLT relocations\n", obj->path,
(unsigned int)ELF_R_TYPE(rela->r_info));
goto done;
}
}
r = 0;
done:
if (cache != NULL)
free(cache);
free(cache);
return (r);
}

View File

@ -324,6 +324,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
/* The relocation for the dynamic loader has already been done. */
if (obj == obj_rtld)
return (0);
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().

View File

@ -126,168 +126,142 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
const Elf_Rel *rellim;
const Elf_Rel *rel;
SymCache *cache;
int r = -1;
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr *where, symval, add;
int r;
r = -1;
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
if (obj != obj_rtld) {
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
} else
cache = NULL;
cache = NULL;
rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
rellim = (const Elf_Rel *)((caddr_t) obj->rel + obj->relsize);
for (rel = obj->rel; rel < rellim; rel++) {
Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
switch (ELF_R_TYPE(rel->r_info)) {
case R_386_NONE:
break;
case R_386_32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where += (Elf_Addr) (defobj->relocbase + def->st_value);
}
break;
case R_386_PC32:
/*
* I don't think the dynamic linker should ever see this
* type of relocation. But the binutils-2.6 tools sometimes
* generate it.
*/
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where +=
(Elf_Addr) (defobj->relocbase + def->st_value) -
(Elf_Addr) where;
}
break;
case R_386_COPY:
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the COPY
* relocation is not in a shared library. They are allowed
* only in executable files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_386_COPY relocation"
" in shared library", obj->path);
goto done;
}
break;
case R_386_GLOB_DAT:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where = (Elf_Addr) (defobj->relocbase + def->st_value);
}
break;
case R_386_RELATIVE:
*where += (Elf_Addr) obj->relocbase;
break;
case R_386_TLS_TPOFF:
case R_386_TLS_TPOFF32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr add;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
* TLS block. This allows us to support (small
* amounts of) static TLS in dynamically loaded
* modules. If we run out of space, we generate an
* error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available for static "
"Thread Local Storage", obj->path);
goto done;
switch (ELF_R_TYPE(rel->r_info)) {
case R_386_32:
case R_386_PC32:
case R_386_GLOB_DAT:
case R_386_TLS_TPOFF:
case R_386_TLS_TPOFF32:
case R_386_TLS_DTPMOD32:
case R_386_TLS_DTPOFF32:
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
switch (ELF_R_TYPE(rel->r_info)) {
case R_386_32:
case R_386_PC32:
case R_386_GLOB_DAT:
if ((flags & SYMLOOK_IFUNC) == 0)
continue;
symval = (Elf_Addr)rtld_resolve_ifunc(
defobj, def);
break;
case R_386_TLS_TPOFF:
case R_386_TLS_TPOFF32:
case R_386_TLS_DTPMOD32:
case R_386_TLS_DTPOFF32:
_rtld_error("%s: IFUNC for TLS reloc",
obj->path);
goto done;
}
} else {
if ((flags & SYMLOOK_IFUNC) != 0)
continue;
symval = (Elf_Addr)defobj->relocbase +
def->st_value;
}
}
add = (Elf_Addr) (def->st_value - defobj->tlsoffset);
if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF)
*where += add;
else
*where -= add;
break;
default:
if ((flags & SYMLOOK_IFUNC) != 0)
continue;
break;
}
break;
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
case R_386_TLS_DTPMOD32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
switch (ELF_R_TYPE(rel->r_info)) {
case R_386_NONE:
break;
case R_386_32:
*where += symval;
break;
case R_386_PC32:
/*
* I don't think the dynamic linker should ever
* see this type of relocation. But the
* binutils-2.6 tools sometimes generate it.
*/
*where += symval - (Elf_Addr)where;
break;
case R_386_COPY:
/*
* These are deferred until all other
* relocations have been done. All we do here
* is make sure that the COPY relocation is
* not in a shared library. They are allowed
* only in executable files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_386_COPY "
"relocation in shared library", obj->path);
goto done;
}
break;
case R_386_GLOB_DAT:
*where = symval;
break;
case R_386_RELATIVE:
*where += (Elf_Addr)obj->relocbase;
break;
case R_386_TLS_TPOFF:
case R_386_TLS_TPOFF32:
/*
* We lazily allocate offsets for static TLS
* as we see the first relocation that
* references the TLS block. This allows us to
* support (small amounts of) static TLS in
* dynamically loaded modules. If we run out
* of space, we generate an error.
*/
if (!defobj->tls_done) {
if (!allocate_tls_offset((Obj_Entry*) defobj)) {
_rtld_error("%s: No space available "
"for static Thread Local Storage",
obj->path);
goto done;
}
}
add = (Elf_Addr)(def->st_value - defobj->tlsoffset);
if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF)
*where += add;
else
*where -= add;
break;
case R_386_TLS_DTPMOD32:
*where += (Elf_Addr)defobj->tlsindex;
break;
case R_386_TLS_DTPOFF32:
*where += (Elf_Addr) def->st_value;
break;
default:
_rtld_error("%s: Unsupported relocation type %d"
" in non-PLT relocations\n", obj->path,
ELF_R_TYPE(rel->r_info));
goto done;
*where += (Elf_Addr) defobj->tlsindex;
}
break;
case R_386_TLS_DTPOFF32:
{
const Elf_Sym *def;
const Obj_Entry *defobj;
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL)
goto done;
*where += (Elf_Addr) def->st_value;
}
break;
default:
_rtld_error("%s: Unsupported relocation type %d"
" in non-PLT relocations\n", obj->path,
ELF_R_TYPE(rel->r_info));
goto done;
}
}
r = 0;
done:
if (cache != NULL)
free(cache);
free(cache);
return (r);
}

View File

@ -275,6 +275,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
if (obj == obj_rtld)
return (0);
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
#ifdef SUPPORT_OLD_BROKEN_LD
broken = 0;
sym = obj->symtab;

View File

@ -294,6 +294,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
SymCache *cache;
int r = -1;
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().

View File

@ -290,6 +290,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
int bytes = obj->dynsymcount * sizeof(SymCache);
int r = -1;
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().

View File

@ -2546,7 +2546,7 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
}
}
/* Process the non-PLT relocations. */
/* Process the non-PLT non-IFUNC relocations. */
if (reloc_non_plt(obj, rtldobj, flags, lockstate))
return (-1);
@ -2559,7 +2559,6 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
}
}
/* Set the special PLT or GOT entries. */
init_pltgot(obj);
@ -2571,6 +2570,15 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
if (reloc_jmpslots(obj, flags, lockstate) == -1)
return (-1);
/*
* Process the non-PLT IFUNC relocations. The relocations are
* processed in two phases, because IFUNC resolvers may
* reference other symbols, which must be readily processed
* before resolvers are called.
*/
if (reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate))
return (-1);
if (obj->relro_size > 0) {
if (mprotect(obj->relro_page, obj->relro_size,
PROT_READ) == -1) {

View File

@ -293,6 +293,8 @@ typedef struct Struct_Obj_Entry {
#define SYMLOOK_DLSYM 0x02 /* Return newest versioned symbol. Used by
dlsym. */
#define SYMLOOK_EARLY 0x04 /* Symlook is done during initialization. */
#define SYMLOOK_IFUNC 0x08 /* Allow IFUNC processing in
reloc_non_plt(). */
/* Flags for load_object(). */
#define RTLD_LO_NOLOAD 0x01 /* dlopen() specified RTLD_NOLOAD. */

View File

@ -300,6 +300,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
SymCache *cache;
int r = -1;
if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().