vtable support for x86-64 (resubmission)
Bo Thorsen
bo@sonofthor.dk
Tue Apr 10 03:26:00 GMT 2001
This patch hasn't been accepted/rejected. I don't have write permission so
please install it.
Bo.
---------- Forwarded message ----------
Date: Fri, 6 Apr 2001 13:44:54 +0100 (BST)
From: Bo Thorsen <bo@sonofthor.dk>
To: binutils@sources.redhat.com
Subject: vtable support for x86-64
This is the patch I said I didn't want to do :-( I would have preferred to
implement vt support properly but I need it right now and can't wait until
I get some binutils education. I still want to try a proper
implementation, but for now the hack used by all other architectures will
have to do.
I don't have write access so someone else has to commit it if it's OK.
Bo.
2001-04-06 Bo Thorsen <bo@suse.de>
* elf64-x86-64.c: Add c++ vtable hack.
Small whitespace and comment changes.
Index: include/elf/x86-64.h
===================================================================
RCS file: /cvs/src/src/include/elf/x86-64.h,v
retrieving revision 1.1
diff -u -r1.1 x86-64.h
--- x86-64.h 2000/11/30 19:05:18 1.1
+++ x86-64.h 2001/04/06 12:42:54
@@ -41,6 +41,8 @@
RELOC_NUMBER (R_X86_64_PC16, 13) /* 16 bit sign extended pc relative*/
RELOC_NUMBER (R_X86_64_8, 14) /* Direct 8 bit sign extended */
RELOC_NUMBER (R_X86_64_PC8, 15) /* 8 bit sign extended pc relative*/
+ RELOC_NUMBER (R_X86_64_GNU_VTINHERIT, 250) /* GNU C++ hack */
+ RELOC_NUMBER (R_X86_64_GNU_VTENTRY, 251) /* GNU C++ hack */
END_RELOC_NUMBERS (R_X86_64_max)
#endif
Index: bfd/elf64-x86-64.c
===================================================================
RCS file: /cvs/src/src/bfd/elf64-x86-64.c,v
retrieving revision 1.11
diff -u -r1.11 elf64-x86-64.c
--- elf64-x86-64.c 2001/03/07 13:49:11 1.11
+++ elf64-x86-64.c 2001/04/06 12:42:55
@@ -37,29 +37,41 @@
static reloc_howto_type x86_64_elf_howto_table[] =
{
HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
- bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0x00000000, 0x00000000, false),
+ bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0x00000000, 0x00000000,
+ false),
HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_bitfield,
- bfd_elf_generic_reloc, "R_X86_64_64", false, MINUS_ONE, MINUS_ONE, false),
+ bfd_elf_generic_reloc, "R_X86_64_64", false, MINUS_ONE, MINUS_ONE,
+ false),
HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0xffffffff, 0xffffffff, true),
+ bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0xffffffff, 0xffffffff,
+ true),
HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0xffffffff, 0xffffffff, false),
+ bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0xffffffff, 0xffffffff,
+ false),
HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0xffffffff, 0xffffffff, true),
+ bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0xffffffff, 0xffffffff,
+ true),
HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield,
- bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0xffffffff, 0xffffffff, false),
+ bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0xffffffff, 0xffffffff,
+ false),
HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_bitfield,
- bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, MINUS_ONE, MINUS_ONE, false),
+ bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, MINUS_ONE,
+ MINUS_ONE, false),
HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_bitfield,
- bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, MINUS_ONE, MINUS_ONE, false),
+ bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, MINUS_ONE,
+ MINUS_ONE, false),
HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_bitfield,
- bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, MINUS_ONE, MINUS_ONE, false),
+ bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, MINUS_ONE,
+ MINUS_ONE, false),
HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true,0 , complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0xffffffff, 0xffffffff, true),
+ bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0xffffffff,
+ 0xffffffff, true),
HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned,
- bfd_elf_generic_reloc, "R_X86_64_32", false, 0xffffffff, 0xffffffff, false),
+ bfd_elf_generic_reloc, "R_X86_64_32", false, 0xffffffff, 0xffffffff,
+ false),
HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_32S", false, 0xffffffff, 0xffffffff, false),
+ bfd_elf_generic_reloc, "R_X86_64_32S", false, 0xffffffff, 0xffffffff,
+ false),
HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
bfd_elf_generic_reloc, "R_X86_64_16", false, 0xffff, 0xffff, false),
HOWTO(R_X86_64_PC16,0, 1, 16, true, 0, complain_overflow_bitfield,
@@ -67,7 +79,16 @@
HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_signed,
bfd_elf_generic_reloc, "R_X86_64_8", false, 0xff, 0xff, false),
HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
- bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0xff, 0xff, true)
+ bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0xff, 0xff, true),
+
+/* GNU extension to record C++ vtable hierarchy. */
+ HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
+ NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
+
+/* GNU extension to record C++ vtable member usage. */
+ HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
+ _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
+ false)
};
/* Map BFD relocs to the x86_64 elf relocs. */
@@ -95,6 +116,8 @@
{ BFD_RELOC_16_PCREL, R_X86_64_PC16, },
{ BFD_RELOC_8, R_X86_64_8, },
{ BFD_RELOC_8_PCREL, R_X86_64_PC8, },
+ { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
+ { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
};
static reloc_howto_type *elf64_x86_64_reloc_type_lookup
@@ -145,11 +168,20 @@
arelent *cache_ptr;
Elf64_Internal_Rela *dst;
{
- unsigned r_type;
+ unsigned r_type, i;
r_type = ELF64_R_TYPE (dst->r_info);
- BFD_ASSERT (r_type < (unsigned int) R_X86_64_max);
- cache_ptr->howto = &x86_64_elf_howto_table[r_type];
+ if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT)
+ {
+ BFD_ASSERT (r_type <= (unsigned int) R_X86_64_PC8);
+ i = r_type;
+ }
+ else
+ {
+ BFD_ASSERT (r_type < (unsigned int) R_X86_64_max);
+ i = r_type - ((unsigned int) R_X86_64_GNU_VTINHERIT - R_X86_64_PC8 - 1);
+ }
+ cache_ptr->howto = &x86_64_elf_howto_table[i];
BFD_ASSERT (r_type == cache_ptr->howto->type);
}
@@ -184,7 +216,7 @@
{
0xff, 0xa3, /* jmp *name@GOTPC(%rip) */
0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
- 0x68, /* pushq immediate */
+ 0x68, /* pushq immediate */
0, 0, 0, 0, /* replaced with index into relocation table. */
0xe9, /* jmp relative */
0, 0, 0, 0 /* replaced with offset to start of .plt0. */
@@ -540,8 +572,7 @@
that this function is only called if we are using an
elf64_x86_64 linker hash table, which means that h is
really a pointer to an elf64_x86_64_link_hash_entry. */
- if (h != NULL
- && ELF64_R_TYPE (rel->r_info) == R_X86_64_PC32)
+ if (h != NULL && ELF64_R_TYPE (rel->r_info) == R_X86_64_PC32)
{
struct elf64_x86_64_link_hash_entry *eh;
struct elf64_x86_64_pcrel_relocs_copied *p;
@@ -568,6 +599,20 @@
}
}
break;
+
+ /* This relocation describes the C++ object vtable hierarchy.
+ Reconstruct it for later use during GC. */
+ case R_X86_64_GNU_VTINHERIT:
+ if (!_bfd_elf64_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
+ return false;
+ break;
+
+ /* This relocation describes which C++ vtable entries are actually
+ used. Record for later use during GC. */
+ case R_X86_64_GNU_VTENTRY:
+ if (!_bfd_elf64_gc_record_vtentry (abfd, sec, h, rel->r_offset))
+ return false;
+ break;
}
}
@@ -587,17 +632,25 @@
{
if (h != NULL)
{
- switch (h->root.type)
+ switch (ELF64_R_TYPE (rel->r_info))
{
- case bfd_link_hash_defined:
- case bfd_link_hash_defweak:
- return h->root.u.def.section;
-
- case bfd_link_hash_common:
- return h->root.u.c.p->section;
+ case R_X86_64_GNU_VTINHERIT:
+ case R_X86_64_GNU_VTENTRY:
+ break;
default:
- break;
+ switch (h->root.type)
+ {
+ case bfd_link_hash_defined:
+ case bfd_link_hash_defweak:
+ return h->root.u.def.section;
+
+ case bfd_link_hash_common:
+ return h->root.u.c.p->section;
+
+ default:
+ break;
+ }
}
}
else
@@ -1096,7 +1149,7 @@
static boolean
elf64_x86_64_relocate_section (output_bfd, info, input_bfd, input_section,
- contents, relocs, local_syms, local_sections)
+ contents, relocs, local_syms, local_sections)
bfd *output_bfd;
struct bfd_link_info *info;
bfd *input_bfd;
@@ -1143,6 +1196,9 @@
unsigned int indx;
r_type = ELF64_R_TYPE (rela->r_info);
+ if (r_type == (int) R_X86_64_GNU_VTINHERIT
+ || r_type == (int) R_X86_64_GNU_VTENTRY)
+ continue;
if ((indx = (unsigned) r_type) >= R_X86_64_max)
{
@@ -1654,7 +1710,7 @@
/* Get the offset into the .got table of the entry that
corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
- bytes. The first three are reserved. */
+ bytes. The first three are reserved for the dynamic linker. */
got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
/* Fill in the entry in the procedure linkage table. */
@@ -1671,6 +1727,7 @@
splt->contents + h->plt.offset + 12);
/* Fill in the entry in the global offset table. */
+ /* FIXME: Comment on the number "6" below. */
bfd_put_64 (output_bfd, (splt->output_section->vma + splt->output_offset
+ h->plt.offset + 6),
sgot->contents + got_offset);
--
[I'm in the middle of relocating to England, so I will only have internet
connection about once every week for the next month or so.]
Bo Thorsen | 28 Merton Road
Free software developer | Slough, SL1 1QW
SuSE Labs | England
More information about the Binutils
mailing list