1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* this flag is uneffective under linux too, should be deleted */
17 #define MAP_DENYWRITE 0
20 /* should probably go in elf.h */
27 #define ELF_PLATFORM get_elf_platform()
29 static const char *get_elf_platform(void)
31 static char elf_platform[] = "i386";
32 int family = (global_env->cpuid_version >> 8) & 0xff;
36 elf_platform[1] = '0' + family;
40 #define ELF_HWCAP get_elf_hwcap()
42 static uint32_t get_elf_hwcap(void)
44 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x2aaaaab000ULL
49 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
51 #define ELF_CLASS ELFCLASS64
52 #define ELF_DATA ELFDATA2LSB
53 #define ELF_ARCH EM_X86_64
55 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
58 regs->rsp = infop->start_stack;
59 regs->rip = infop->entry;
64 #define ELF_START_MMAP 0x80000000
67 * This is used to ensure we don't load something for the wrong architecture.
69 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
72 * These are used to set parameters in the core dumps.
74 #define ELF_CLASS ELFCLASS32
75 #define ELF_DATA ELFDATA2LSB
76 #define ELF_ARCH EM_386
78 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
80 regs->esp = infop->start_stack;
81 regs->eip = infop->entry;
83 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
84 starts %edx contains a pointer to a function which might be
85 registered using `atexit'. This provides a mean for the
86 dynamic linker to call DT_FINI functions for shared libraries
87 that have been loaded before the code runs.
89 A value of 0 tells we have no such handler. */
94 #define USE_ELF_CORE_DUMP
95 #define ELF_EXEC_PAGESIZE 4096
101 #define ELF_START_MMAP 0x80000000
103 #define elf_check_arch(x) ( (x) == EM_ARM )
105 #define ELF_CLASS ELFCLASS32
106 #ifdef TARGET_WORDS_BIGENDIAN
107 #define ELF_DATA ELFDATA2MSB
109 #define ELF_DATA ELFDATA2LSB
111 #define ELF_ARCH EM_ARM
113 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
115 target_long stack = infop->start_stack;
116 memset(regs, 0, sizeof(*regs));
117 regs->ARM_cpsr = 0x10;
118 if (infop->entry & 1)
119 regs->ARM_cpsr |= CPSR_T;
120 regs->ARM_pc = infop->entry & 0xfffffffe;
121 regs->ARM_sp = infop->start_stack;
122 regs->ARM_r2 = tgetl(stack + 8); /* envp */
123 regs->ARM_r1 = tgetl(stack + 4); /* envp */
124 /* XXX: it seems that r0 is zeroed after ! */
126 /* For uClinux PIC binaries. */
127 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
128 regs->ARM_r10 = infop->start_data;
131 #define USE_ELF_CORE_DUMP
132 #define ELF_EXEC_PAGESIZE 4096
136 ARM_HWCAP_ARM_SWP = 1 << 0,
137 ARM_HWCAP_ARM_HALF = 1 << 1,
138 ARM_HWCAP_ARM_THUMB = 1 << 2,
139 ARM_HWCAP_ARM_26BIT = 1 << 3,
140 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
141 ARM_HWCAP_ARM_FPA = 1 << 5,
142 ARM_HWCAP_ARM_VFP = 1 << 6,
143 ARM_HWCAP_ARM_EDSP = 1 << 7,
146 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
147 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
148 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
153 #ifdef TARGET_SPARC64
155 #define ELF_START_MMAP 0x80000000
157 #define elf_check_arch(x) ( (x) == EM_SPARCV9 )
159 #define ELF_CLASS ELFCLASS64
160 #define ELF_DATA ELFDATA2MSB
161 #define ELF_ARCH EM_SPARCV9
163 #define STACK_BIAS 2047
165 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
168 regs->pc = infop->entry;
169 regs->npc = regs->pc + 4;
171 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
175 #define ELF_START_MMAP 0x80000000
177 #define elf_check_arch(x) ( (x) == EM_SPARC )
179 #define ELF_CLASS ELFCLASS32
180 #define ELF_DATA ELFDATA2MSB
181 #define ELF_ARCH EM_SPARC
183 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
186 regs->pc = infop->entry;
187 regs->npc = regs->pc + 4;
189 regs->u_regs[14] = infop->start_stack - 16 * 4;
197 #define ELF_START_MMAP 0x80000000
201 #define elf_check_arch(x) ( (x) == EM_PPC64 )
203 #define ELF_CLASS ELFCLASS64
207 #define elf_check_arch(x) ( (x) == EM_PPC )
209 #define ELF_CLASS ELFCLASS32
213 #ifdef TARGET_WORDS_BIGENDIAN
214 #define ELF_DATA ELFDATA2MSB
216 #define ELF_DATA ELFDATA2LSB
218 #define ELF_ARCH EM_PPC
221 * We need to put in some extra aux table entries to tell glibc what
222 * the cache block size is, so it can use the dcbz instruction safely.
224 #define AT_DCACHEBSIZE 19
225 #define AT_ICACHEBSIZE 20
226 #define AT_UCACHEBSIZE 21
227 /* A special ignored type value for PPC, for glibc compatibility. */
228 #define AT_IGNOREPPC 22
230 * The requirements here are:
231 * - keep the final alignment of sp (sp & 0xf)
232 * - make sure the 32-bit value at the first 16 byte aligned position of
233 * AUXV is greater than 16 for glibc compatibility.
234 * AT_IGNOREPPC is used for that.
235 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
236 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
238 #define DLINFO_ARCH_ITEMS 5
239 #define ARCH_DLINFO \
241 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
242 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
243 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
245 * Now handle glibc compatibility. \
247 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
248 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
251 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
253 target_ulong pos = infop->start_stack;
256 target_ulong entry, toc;
259 _regs->msr = 1 << MSR_PR; /* Set user mode */
260 _regs->gpr[1] = infop->start_stack;
262 entry = ldq_raw(infop->entry) + infop->load_addr;
263 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
265 infop->entry = entry;
267 _regs->nip = infop->entry;
268 /* Note that isn't exactly what regular kernel does
269 * but this is what the ABI wants and is needed to allow
270 * execution of PPC BSD programs.
272 _regs->gpr[3] = tgetl(pos);
273 pos += sizeof(target_ulong);
275 for (tmp = 1; tmp != 0; pos += sizeof(target_ulong))
280 #define USE_ELF_CORE_DUMP
281 #define ELF_EXEC_PAGESIZE 4096
287 #define ELF_START_MMAP 0x80000000
289 #define elf_check_arch(x) ( (x) == EM_MIPS )
292 #define ELF_CLASS ELFCLASS64
294 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_MIPS
303 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
305 regs->cp0_status = CP0St_UM;
306 regs->cp0_epc = infop->entry;
307 regs->regs[29] = infop->start_stack;
310 #define USE_ELF_CORE_DUMP
311 #define ELF_EXEC_PAGESIZE 4096
313 #endif /* TARGET_MIPS */
317 #define ELF_START_MMAP 0x80000000
319 #define elf_check_arch(x) ( (x) == EM_SH )
321 #define ELF_CLASS ELFCLASS32
322 #define ELF_DATA ELFDATA2LSB
323 #define ELF_ARCH EM_SH
325 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
327 /* Check other registers XXXXX */
328 regs->pc = infop->entry;
329 regs->regs[15] = infop->start_stack;
332 #define USE_ELF_CORE_DUMP
333 #define ELF_EXEC_PAGESIZE 4096
339 #define ELF_START_MMAP 0x80000000
341 #define elf_check_arch(x) ( (x) == EM_CRIS )
343 #define ELF_CLASS ELFCLASS32
344 #define ELF_DATA ELFDATA2LSB
345 #define ELF_ARCH EM_CRIS
347 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
349 regs->erp = infop->entry;
352 #define USE_ELF_CORE_DUMP
353 #define ELF_EXEC_PAGESIZE 8192
359 #define ELF_START_MMAP 0x80000000
361 #define elf_check_arch(x) ( (x) == EM_68K )
363 #define ELF_CLASS ELFCLASS32
364 #define ELF_DATA ELFDATA2MSB
365 #define ELF_ARCH EM_68K
367 /* ??? Does this need to do anything?
368 #define ELF_PLAT_INIT(_r) */
370 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
372 regs->usp = infop->start_stack;
374 regs->pc = infop->entry;
377 #define USE_ELF_CORE_DUMP
378 #define ELF_EXEC_PAGESIZE 8192
384 #define ELF_START_MMAP (0x30000000000ULL)
386 #define elf_check_arch(x) ( (x) == ELF_ARCH )
388 #define ELF_CLASS ELFCLASS64
389 #define ELF_DATA ELFDATA2MSB
390 #define ELF_ARCH EM_ALPHA
392 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
394 regs->pc = infop->entry;
396 regs->usp = infop->start_stack;
397 regs->unique = infop->start_data; /* ? */
398 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
399 regs->unique, infop->start_data);
402 #define USE_ELF_CORE_DUMP
403 #define ELF_EXEC_PAGESIZE 8192
405 #endif /* TARGET_ALPHA */
408 #define ELF_PLATFORM (NULL)
419 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
420 unsigned int a_text; /* length of text, in bytes */
421 unsigned int a_data; /* length of data, in bytes */
422 unsigned int a_bss; /* length of uninitialized data area, in bytes */
423 unsigned int a_syms; /* length of symbol table data in file, in bytes */
424 unsigned int a_entry; /* start address */
425 unsigned int a_trsize; /* length of relocation info for text, in bytes */
426 unsigned int a_drsize; /* length of relocation info for data, in bytes */
430 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
436 /* max code+data+bss space allocated to elf interpreter */
437 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
439 /* max code+data+bss+brk space allocated to ET_DYN executables */
440 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
442 /* from personality.h */
444 /* Flags for bug emulation. These occupy the top three bytes. */
445 #define STICKY_TIMEOUTS 0x4000000
446 #define WHOLE_SECONDS 0x2000000
448 /* Personality types. These go in the low byte. Avoid using the top bit,
449 * it will conflict with error returns.
451 #define PER_MASK (0x00ff)
452 #define PER_LINUX (0x0000)
453 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
454 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
455 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
456 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
457 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
458 #define PER_BSD (0x0006)
459 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
461 /* Necessary parameters */
462 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
463 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
464 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
466 #define INTERPRETER_NONE 0
467 #define INTERPRETER_AOUT 1
468 #define INTERPRETER_ELF 2
470 #define DLINFO_ITEMS 12
472 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
477 extern unsigned long x86_stack_size;
479 static int load_aout_interp(void * exptr, int interp_fd);
482 static void bswap_ehdr(struct elfhdr *ehdr)
484 bswap16s(&ehdr->e_type); /* Object file type */
485 bswap16s(&ehdr->e_machine); /* Architecture */
486 bswap32s(&ehdr->e_version); /* Object file version */
487 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
488 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
489 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
490 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
491 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
492 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
493 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
494 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
495 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
496 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
499 static void bswap_phdr(struct elf_phdr *phdr)
501 bswap32s(&phdr->p_type); /* Segment type */
502 bswaptls(&phdr->p_offset); /* Segment file offset */
503 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
504 bswaptls(&phdr->p_paddr); /* Segment physical address */
505 bswaptls(&phdr->p_filesz); /* Segment size in file */
506 bswaptls(&phdr->p_memsz); /* Segment size in memory */
507 bswap32s(&phdr->p_flags); /* Segment flags */
508 bswaptls(&phdr->p_align); /* Segment alignment */
511 static void bswap_shdr(struct elf_shdr *shdr)
513 bswap32s(&shdr->sh_name);
514 bswap32s(&shdr->sh_type);
515 bswaptls(&shdr->sh_flags);
516 bswaptls(&shdr->sh_addr);
517 bswaptls(&shdr->sh_offset);
518 bswaptls(&shdr->sh_size);
519 bswap32s(&shdr->sh_link);
520 bswap32s(&shdr->sh_info);
521 bswaptls(&shdr->sh_addralign);
522 bswaptls(&shdr->sh_entsize);
525 static void bswap_sym(struct elf_sym *sym)
527 bswap32s(&sym->st_name);
528 bswaptls(&sym->st_value);
529 bswaptls(&sym->st_size);
530 bswap16s(&sym->st_shndx);
535 * 'copy_elf_strings()' copies argument/envelope strings from user
536 * memory to free pages in kernel mem. These are in a format ready
537 * to be put directly into the top of new user memory.
540 static target_ulong copy_elf_strings(int argc,char ** argv, void **page,
543 char *tmp, *tmp1, *pag = NULL;
547 return 0; /* bullet-proofing */
552 fprintf(stderr, "VFS: argc is wrong");
558 if (p < len) { /* this shouldn't happen - 128kB */
564 offset = p % TARGET_PAGE_SIZE;
565 pag = (char *)page[p/TARGET_PAGE_SIZE];
567 pag = (char *)malloc(TARGET_PAGE_SIZE);
568 memset(pag, 0, TARGET_PAGE_SIZE);
569 page[p/TARGET_PAGE_SIZE] = pag;
574 if (len == 0 || offset == 0) {
575 *(pag + offset) = *tmp;
578 int bytes_to_copy = (len > offset) ? offset : len;
579 tmp -= bytes_to_copy;
581 offset -= bytes_to_copy;
582 len -= bytes_to_copy;
583 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
590 target_ulong setup_arg_pages(target_ulong p, struct linux_binprm * bprm,
591 struct image_info * info)
593 target_ulong stack_base, size, error;
596 /* Create enough stack to hold everything. If we don't use
597 * it for args, we'll use it for something else...
599 size = x86_stack_size;
600 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
601 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
602 error = target_mmap(0,
603 size + qemu_host_page_size,
604 PROT_READ | PROT_WRITE,
605 MAP_PRIVATE | MAP_ANONYMOUS,
611 /* we reserve one extra page at the top of the stack as guard */
612 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
614 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
617 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
621 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
624 stack_base += TARGET_PAGE_SIZE;
629 static void set_brk(target_ulong start, target_ulong end)
631 /* page-align the start and end addresses... */
632 start = HOST_PAGE_ALIGN(start);
633 end = HOST_PAGE_ALIGN(end);
636 if(target_mmap(start, end - start,
637 PROT_READ | PROT_WRITE | PROT_EXEC,
638 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
639 perror("cannot mmap brk");
645 /* We need to explicitly zero any fractional pages after the data
646 section (i.e. bss). This would contain the junk from the file that
647 should not be in memory. */
648 static void padzero(target_ulong elf_bss, target_ulong last_bss)
652 if (elf_bss >= last_bss)
655 /* XXX: this is really a hack : if the real host page size is
656 smaller than the target page size, some pages after the end
657 of the file may not be mapped. A better fix would be to
658 patch target_mmap(), but it is more complicated as the file
659 size must be known */
660 if (qemu_real_host_page_size < qemu_host_page_size) {
661 target_ulong end_addr, end_addr1;
662 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
663 ~(qemu_real_host_page_size - 1);
664 end_addr = HOST_PAGE_ALIGN(elf_bss);
665 if (end_addr1 < end_addr) {
666 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
667 PROT_READ|PROT_WRITE|PROT_EXEC,
668 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
672 nbyte = elf_bss & (qemu_host_page_size-1);
674 nbyte = qemu_host_page_size - nbyte;
683 static target_ulong create_elf_tables(target_ulong p, int argc, int envc,
684 struct elfhdr * exec,
685 target_ulong load_addr,
686 target_ulong load_bias,
687 target_ulong interp_load_addr, int ibcs,
688 struct image_info *info)
692 target_ulong u_platform;
693 const char *k_platform;
694 const int n = sizeof(elf_addr_t);
698 k_platform = ELF_PLATFORM;
700 size_t len = strlen(k_platform) + 1;
701 sp -= (len + n - 1) & ~(n - 1);
703 memcpy_to_target(sp, k_platform, len);
706 * Force 16 byte _final_ alignment here for generality.
708 sp = sp &~ (target_ulong)15;
709 size = (DLINFO_ITEMS + 1) * 2;
712 #ifdef DLINFO_ARCH_ITEMS
713 size += DLINFO_ARCH_ITEMS * 2;
715 size += envc + argc + 2;
716 size += (!ibcs ? 3 : 1); /* argc itself */
719 sp -= 16 - (size & 15);
721 /* This is correct because Linux defines
722 * elf_addr_t as Elf32_Off / Elf64_Off
724 #if ELF_CLASS == ELFCLASS32
725 #define NEW_AUX_ENT(id, val) do { \
726 sp -= n; tput32(sp, val); \
727 sp -= n; tput32(sp, id); \
730 #define NEW_AUX_ENT(id, val) do { \
731 sp -= n; tput64(sp, val); \
732 sp -= n; tput64(sp, id); \
735 NEW_AUX_ENT (AT_NULL, 0);
737 /* There must be exactly DLINFO_ITEMS entries here. */
738 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
739 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
740 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
741 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
742 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
743 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
744 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
745 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
746 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
747 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
748 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
749 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
751 NEW_AUX_ENT(AT_PLATFORM, u_platform);
754 * ARCH_DLINFO must come last so platform specific code can enforce
755 * special alignment requirements on the AUXV if necessary (eg. PPC).
761 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
766 static target_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
768 target_ulong *interp_load_addr)
770 struct elf_phdr *elf_phdata = NULL;
771 struct elf_phdr *eppnt;
772 target_ulong load_addr = 0;
773 int load_addr_set = 0;
775 target_ulong last_bss, elf_bss;
784 bswap_ehdr(interp_elf_ex);
786 /* First of all, some simple consistency checks */
787 if ((interp_elf_ex->e_type != ET_EXEC &&
788 interp_elf_ex->e_type != ET_DYN) ||
789 !elf_check_arch(interp_elf_ex->e_machine)) {
790 return ~((target_ulong)0UL);
794 /* Now read in all of the header information */
796 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
797 return ~(target_ulong)0UL;
799 elf_phdata = (struct elf_phdr *)
800 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
803 return ~((target_ulong)0UL);
806 * If the size of this structure has changed, then punt, since
807 * we will be doing the wrong thing.
809 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
811 return ~((target_ulong)0UL);
814 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
816 retval = read(interpreter_fd,
818 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
821 perror("load_elf_interp");
828 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
833 if (interp_elf_ex->e_type == ET_DYN) {
834 /* in order to avoid hardcoding the interpreter load
835 address in qemu, we allocate a big enough memory zone */
836 error = target_mmap(0, INTERP_MAP_SIZE,
837 PROT_NONE, MAP_PRIVATE | MAP_ANON,
848 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
849 if (eppnt->p_type == PT_LOAD) {
850 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
852 target_ulong vaddr = 0;
855 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
856 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
857 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
858 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
859 elf_type |= MAP_FIXED;
860 vaddr = eppnt->p_vaddr;
862 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
863 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
867 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
871 close(interpreter_fd);
873 return ~((target_ulong)0UL);
876 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
882 * Find the end of the file mapping for this phdr, and keep
883 * track of the largest address we see for this.
885 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
886 if (k > elf_bss) elf_bss = k;
889 * Do the same thing for the memory mapping - between
890 * elf_bss and last_bss is the bss section.
892 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
893 if (k > last_bss) last_bss = k;
896 /* Now use mmap to map the library into memory. */
898 close(interpreter_fd);
901 * Now fill out the bss section. First pad the last page up
902 * to the page boundary, and then perform a mmap to make sure
903 * that there are zeromapped pages up to and including the last
906 padzero(elf_bss, last_bss);
907 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
909 /* Map the last of the bss segment */
910 if (last_bss > elf_bss) {
911 target_mmap(elf_bss, last_bss-elf_bss,
912 PROT_READ|PROT_WRITE|PROT_EXEC,
913 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
917 *interp_load_addr = load_addr;
918 return ((target_ulong) interp_elf_ex->e_entry) + load_addr;
921 /* Best attempt to load symbols from this ELF object. */
922 static void load_symbols(struct elfhdr *hdr, int fd)
925 struct elf_shdr sechdr, symtab, strtab;
928 #if (ELF_CLASS == ELFCLASS64)
929 // Disas uses 32 bit symbols
930 struct elf32_sym *syms32 = NULL;
934 lseek(fd, hdr->e_shoff, SEEK_SET);
935 for (i = 0; i < hdr->e_shnum; i++) {
936 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
941 if (sechdr.sh_type == SHT_SYMTAB) {
943 lseek(fd, hdr->e_shoff
944 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
945 if (read(fd, &strtab, sizeof(strtab))
954 return; /* Shouldn't happen... */
957 /* Now know where the strtab and symtab are. Snarf them. */
958 s = malloc(sizeof(*s));
959 s->disas_symtab = malloc(symtab.sh_size);
960 #if (ELF_CLASS == ELFCLASS64)
961 syms32 = malloc(symtab.sh_size / sizeof(struct elf_sym)
962 * sizeof(struct elf32_sym));
964 s->disas_strtab = strings = malloc(strtab.sh_size);
965 if (!s->disas_symtab || !s->disas_strtab)
968 lseek(fd, symtab.sh_offset, SEEK_SET);
969 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
972 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++) {
974 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
976 #if (ELF_CLASS == ELFCLASS64)
977 sym = s->disas_symtab + sizeof(struct elf_sym)*i;
978 syms32[i].st_name = sym->st_name;
979 syms32[i].st_info = sym->st_info;
980 syms32[i].st_other = sym->st_other;
981 syms32[i].st_shndx = sym->st_shndx;
982 syms32[i].st_value = sym->st_value & 0xffffffff;
983 syms32[i].st_size = sym->st_size & 0xffffffff;
987 #if (ELF_CLASS == ELFCLASS64)
988 free(s->disas_symtab);
989 s->disas_symtab = syms32;
991 lseek(fd, strtab.sh_offset, SEEK_SET);
992 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
994 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
999 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1000 struct image_info * info)
1002 struct elfhdr elf_ex;
1003 struct elfhdr interp_elf_ex;
1004 struct exec interp_ex;
1005 int interpreter_fd = -1; /* avoid warning */
1006 target_ulong load_addr, load_bias;
1007 int load_addr_set = 0;
1008 unsigned int interpreter_type = INTERPRETER_NONE;
1009 unsigned char ibcs2_interpreter;
1011 target_ulong mapped_addr;
1012 struct elf_phdr * elf_ppnt;
1013 struct elf_phdr *elf_phdata;
1014 target_ulong elf_bss, k, elf_brk;
1016 char * elf_interpreter;
1017 target_ulong elf_entry, interp_load_addr = 0;
1019 target_ulong start_code, end_code, start_data, end_data;
1020 target_ulong reloc_func_desc = 0;
1021 target_ulong elf_stack;
1022 char passed_fileno[6];
1024 ibcs2_interpreter = 0;
1028 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1030 bswap_ehdr(&elf_ex);
1033 /* First of all, some simple consistency checks */
1034 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1035 (! elf_check_arch(elf_ex.e_machine))) {
1039 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1040 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1041 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1046 /* Now read in all of the header information */
1047 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1048 if (elf_phdata == NULL) {
1052 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1054 retval = read(bprm->fd, (char *) elf_phdata,
1055 elf_ex.e_phentsize * elf_ex.e_phnum);
1059 perror("load_elf_binary");
1066 elf_ppnt = elf_phdata;
1067 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1068 bswap_phdr(elf_ppnt);
1071 elf_ppnt = elf_phdata;
1077 elf_stack = ~((target_ulong)0UL);
1078 elf_interpreter = NULL;
1079 start_code = ~((target_ulong)0UL);
1084 for(i=0;i < elf_ex.e_phnum; i++) {
1085 if (elf_ppnt->p_type == PT_INTERP) {
1086 if ( elf_interpreter != NULL )
1089 free(elf_interpreter);
1094 /* This is the program interpreter used for
1095 * shared libraries - for now assume that this
1096 * is an a.out format binary
1099 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1101 if (elf_interpreter == NULL) {
1107 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1109 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1112 perror("load_elf_binary2");
1116 /* If the program interpreter is one of these two,
1117 then assume an iBCS2 image. Otherwise assume
1118 a native linux image. */
1120 /* JRP - Need to add X86 lib dir stuff here... */
1122 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1123 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1124 ibcs2_interpreter = 1;
1128 printf("Using ELF interpreter %s\n", elf_interpreter);
1131 retval = open(path(elf_interpreter), O_RDONLY);
1133 interpreter_fd = retval;
1136 perror(elf_interpreter);
1138 /* retval = -errno; */
1143 retval = lseek(interpreter_fd, 0, SEEK_SET);
1145 retval = read(interpreter_fd,bprm->buf,128);
1149 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1150 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1153 perror("load_elf_binary3");
1156 free(elf_interpreter);
1164 /* Some simple consistency checks for the interpreter */
1165 if (elf_interpreter){
1166 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1168 /* Now figure out which format our binary is */
1169 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1170 (N_MAGIC(interp_ex) != QMAGIC)) {
1171 interpreter_type = INTERPRETER_ELF;
1174 if (interp_elf_ex.e_ident[0] != 0x7f ||
1175 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1176 interpreter_type &= ~INTERPRETER_ELF;
1179 if (!interpreter_type) {
1180 free(elf_interpreter);
1187 /* OK, we are done with that, now set up the arg stuff,
1188 and then start this sucker up */
1193 if (interpreter_type == INTERPRETER_AOUT) {
1194 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1195 passed_p = passed_fileno;
1197 if (elf_interpreter) {
1198 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1203 if (elf_interpreter) {
1204 free(elf_interpreter);
1212 /* OK, This is the point of no return */
1215 info->start_mmap = (target_ulong)ELF_START_MMAP;
1217 elf_entry = (target_ulong) elf_ex.e_entry;
1219 /* Do this so that we can load the interpreter, if need be. We will
1220 change some of these later */
1222 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1223 info->start_stack = bprm->p;
1225 /* Now we do a little grungy work by mmaping the ELF image into
1226 * the correct location in memory. At this point, we assume that
1227 * the image should be loaded at fixed address, not at a variable
1231 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1236 if (elf_ppnt->p_type != PT_LOAD)
1239 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1240 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1241 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1242 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1243 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1244 elf_flags |= MAP_FIXED;
1245 } else if (elf_ex.e_type == ET_DYN) {
1246 /* Try and get dynamic programs out of the way of the default mmap
1247 base, as well as whatever program they might try to exec. This
1248 is because the brk will follow the loader, and is not movable. */
1249 /* NOTE: for qemu, we do a big mmap to get enough space
1250 without hardcoding any address */
1251 error = target_mmap(0, ET_DYN_MAP_SIZE,
1252 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1258 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1261 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1262 (elf_ppnt->p_filesz +
1263 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1265 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1267 (elf_ppnt->p_offset -
1268 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1274 #ifdef LOW_ELF_STACK
1275 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1276 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1279 if (!load_addr_set) {
1281 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1282 if (elf_ex.e_type == ET_DYN) {
1283 load_bias += error -
1284 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1285 load_addr += load_bias;
1286 reloc_func_desc = load_bias;
1289 k = elf_ppnt->p_vaddr;
1294 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1297 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1301 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1302 if (k > elf_brk) elf_brk = k;
1305 elf_entry += load_bias;
1306 elf_bss += load_bias;
1307 elf_brk += load_bias;
1308 start_code += load_bias;
1309 end_code += load_bias;
1310 start_data += load_bias;
1311 end_data += load_bias;
1313 if (elf_interpreter) {
1314 if (interpreter_type & 1) {
1315 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1317 else if (interpreter_type & 2) {
1318 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1321 reloc_func_desc = interp_load_addr;
1323 close(interpreter_fd);
1324 free(elf_interpreter);
1326 if (elf_entry == ~((target_ulong)0UL)) {
1327 printf("Unable to load interpreter\n");
1337 load_symbols(&elf_ex, bprm->fd);
1339 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1340 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1342 #ifdef LOW_ELF_STACK
1343 info->start_stack = bprm->p = elf_stack - 4;
1345 bprm->p = create_elf_tables(bprm->p,
1349 load_addr, load_bias,
1351 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1353 info->load_addr = reloc_func_desc;
1354 info->start_brk = info->brk = elf_brk;
1355 info->end_code = end_code;
1356 info->start_code = start_code;
1357 info->start_data = start_data;
1358 info->end_data = end_data;
1359 info->start_stack = bprm->p;
1361 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1363 set_brk(elf_bss, elf_brk);
1365 padzero(elf_bss, elf_brk);
1368 printf("(start_brk) %x\n" , info->start_brk);
1369 printf("(end_code) %x\n" , info->end_code);
1370 printf("(start_code) %x\n" , info->start_code);
1371 printf("(end_data) %x\n" , info->end_data);
1372 printf("(start_stack) %x\n" , info->start_stack);
1373 printf("(brk) %x\n" , info->brk);
1376 if ( info->personality == PER_SVR4 )
1378 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1379 and some applications "depend" upon this behavior.
1380 Since we do not have the power to recompile these, we
1381 emulate the SVr4 behavior. Sigh. */
1382 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1383 MAP_FIXED | MAP_PRIVATE, -1, 0);
1386 info->entry = elf_entry;
1391 static int load_aout_interp(void * exptr, int interp_fd)
1393 printf("a.out interpreter not yet supported\n");
1397 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1399 init_thread(regs, infop);