+/*
+ * Define a set of types which can be used for both ELF32 and ELF64
+ */
+
+#ifdef ELF_64BIT
+#define ELFCLASS ELFCLASS64
+#define Elf_Addr Elf64_Addr
+#define Elf_Word Elf64_Word
+#define Elf_Sword Elf64_Sword
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Rel Elf64_Rel
+#define Elf_Rela Elf64_Rela
+#define ELF_ST_TYPE ELF64_ST_TYPE
+#define ELF_ST_BIND ELF64_ST_BIND
+#define ELF_R_TYPE ELF64_R_TYPE
+#define ELF_R_SYM ELF64_R_SYM
+#else
+#define ELFCLASS ELFCLASS32
+#define Elf_Addr Elf32_Addr
+#define Elf_Word Elf32_Word
+#define Elf_Sword Elf32_Sword
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Rel Elf32_Rel
+#define Elf_Rela Elf32_Rela
+#define ELF_ST_TYPE ELF32_ST_TYPE
+#define ELF_ST_BIND ELF32_ST_BIND
+#define ELF_R_TYPE ELF32_R_TYPE
+#define ELF_R_SYM ELF32_R_SYM
+#endif
+
+
+/*
+ * Functions to allocate entries in dynamic sections. Currently we simply
+ * preallocate a large number, and we don't check if a entry for the given
+ * target already exists (a linear search is too slow). Ideally these
+ * entries would be associated with symbols.
+ */
+
+/* These sizes sufficient to load HSbase + HShaskell98 + a few modules */
+#define GOT_SIZE 0x20000
+#define FUNCTION_TABLE_SIZE 0x10000
+#define PLT_SIZE 0x08000
+
+#ifdef ELF_NEED_GOT
+static Elf_Addr got[GOT_SIZE];
+static unsigned int gotIndex;
+static Elf_Addr gp_val = (Elf_Addr)got;
+
+static Elf_Addr
+allocateGOTEntry(Elf_Addr target)
+{
+ Elf_Addr *entry;
+
+ if (gotIndex >= GOT_SIZE)
+ barf("Global offset table overflow");
+
+ entry = &got[gotIndex++];
+ *entry = target;
+ return (Elf_Addr)entry;
+}
+#endif
+
+#ifdef ELF_FUNCTION_DESC
+typedef struct {
+ Elf_Addr ip;
+ Elf_Addr gp;
+} FunctionDesc;
+
+static FunctionDesc functionTable[FUNCTION_TABLE_SIZE];
+static unsigned int functionTableIndex;
+
+static Elf_Addr
+allocateFunctionDesc(Elf_Addr target)
+{
+ FunctionDesc *entry;
+
+ if (functionTableIndex >= FUNCTION_TABLE_SIZE)
+ barf("Function table overflow");
+
+ entry = &functionTable[functionTableIndex++];
+ entry->ip = target;
+ entry->gp = (Elf_Addr)gp_val;
+ return (Elf_Addr)entry;
+}
+
+static Elf_Addr
+copyFunctionDesc(Elf_Addr target)
+{
+ FunctionDesc *olddesc = (FunctionDesc *)target;
+ FunctionDesc *newdesc;
+
+ newdesc = (FunctionDesc *)allocateFunctionDesc(olddesc->ip);
+ newdesc->gp = olddesc->gp;
+ return (Elf_Addr)newdesc;
+}
+#endif
+
+#ifdef ELF_NEED_PLT
+#ifdef ia64_TARGET_ARCH
+static void ia64_reloc_gprel22(Elf_Addr target, Elf_Addr value);
+static void ia64_reloc_pcrel21(Elf_Addr target, Elf_Addr value, ObjectCode *oc);
+
+static unsigned char plt_code[] =
+{
+ /* taken from binutils bfd/elfxx-ia64.c */
+ 0x0b, 0x78, 0x00, 0x02, 0x00, 0x24, /* [MMI] addl r15=0,r1;; */
+ 0x00, 0x41, 0x3c, 0x30, 0x28, 0xc0, /* ld8 r16=[r15],8 */
+ 0x01, 0x08, 0x00, 0x84, /* mov r14=r1;; */
+ 0x11, 0x08, 0x00, 0x1e, 0x18, 0x10, /* [MIB] ld8 r1=[r15] */
+ 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
+ 0x60, 0x00, 0x80, 0x00 /* br.few b6;; */
+};
+
+/* If we can't get to the function descriptor via gp, take a local copy of it */
+#define PLT_RELOC(code, target) { \
+ Elf64_Sxword rel_value = target - gp_val; \
+ if ((rel_value > 0x1fffff) || (rel_value < -0x1fffff)) \
+ ia64_reloc_gprel22((Elf_Addr)code, copyFunctionDesc(target)); \
+ else \
+ ia64_reloc_gprel22((Elf_Addr)code, target); \
+ }
+#endif
+
+typedef struct {
+ unsigned char code[sizeof(plt_code)];
+} PLTEntry;
+
+static Elf_Addr
+allocatePLTEntry(Elf_Addr target, ObjectCode *oc)
+{
+ PLTEntry *plt = (PLTEntry *)oc->plt;
+ PLTEntry *entry;
+
+ if (oc->pltIndex >= PLT_SIZE)
+ barf("Procedure table overflow");
+
+ entry = &plt[oc->pltIndex++];
+ memcpy(entry->code, plt_code, sizeof(entry->code));
+ PLT_RELOC(entry->code, target);
+ return (Elf_Addr)entry;
+}
+
+static unsigned int
+PLTSize(void)
+{
+ return (PLT_SIZE * sizeof(PLTEntry));
+}
+#endif
+
+
+/*
+ * Generic ELF functions
+ */
+