1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* this flag is uneffective under linux too, should be deleted */
17 #define MAP_DENYWRITE 0
20 /* should probably go in elf.h */
27 #define ELF_PLATFORM get_elf_platform()
29 static const char *get_elf_platform(void)
31 static char elf_platform[] = "i386";
32 int family = (global_env->cpuid_version >> 8) & 0xff;
36 elf_platform[1] = '0' + family;
40 #define ELF_HWCAP get_elf_hwcap()
42 static uint32_t get_elf_hwcap(void)
44 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x2aaaaab000ULL
49 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
51 #define ELF_CLASS ELFCLASS64
52 #define ELF_DATA ELFDATA2LSB
53 #define ELF_ARCH EM_X86_64
55 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
58 regs->rsp = infop->start_stack;
59 regs->rip = infop->entry;
64 #define ELF_START_MMAP 0x80000000
67 * This is used to ensure we don't load something for the wrong architecture.
69 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
72 * These are used to set parameters in the core dumps.
74 #define ELF_CLASS ELFCLASS32
75 #define ELF_DATA ELFDATA2LSB
76 #define ELF_ARCH EM_386
78 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
80 regs->esp = infop->start_stack;
81 regs->eip = infop->entry;
83 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
84 starts %edx contains a pointer to a function which might be
85 registered using `atexit'. This provides a mean for the
86 dynamic linker to call DT_FINI functions for shared libraries
87 that have been loaded before the code runs.
89 A value of 0 tells we have no such handler. */
94 #define USE_ELF_CORE_DUMP
95 #define ELF_EXEC_PAGESIZE 4096
101 #define ELF_START_MMAP 0x80000000
103 #define elf_check_arch(x) ( (x) == EM_ARM )
105 #define ELF_CLASS ELFCLASS32
106 #ifdef TARGET_WORDS_BIGENDIAN
107 #define ELF_DATA ELFDATA2MSB
109 #define ELF_DATA ELFDATA2LSB
111 #define ELF_ARCH EM_ARM
113 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
115 target_long stack = infop->start_stack;
116 memset(regs, 0, sizeof(*regs));
117 regs->ARM_cpsr = 0x10;
118 if (infop->entry & 1)
119 regs->ARM_cpsr |= CPSR_T;
120 regs->ARM_pc = infop->entry & 0xfffffffe;
121 regs->ARM_sp = infop->start_stack;
122 regs->ARM_r2 = tgetl(stack + 8); /* envp */
123 regs->ARM_r1 = tgetl(stack + 4); /* envp */
124 /* XXX: it seems that r0 is zeroed after ! */
126 /* For uClinux PIC binaries. */
127 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
128 regs->ARM_r10 = infop->start_data;
131 #define USE_ELF_CORE_DUMP
132 #define ELF_EXEC_PAGESIZE 4096
136 ARM_HWCAP_ARM_SWP = 1 << 0,
137 ARM_HWCAP_ARM_HALF = 1 << 1,
138 ARM_HWCAP_ARM_THUMB = 1 << 2,
139 ARM_HWCAP_ARM_26BIT = 1 << 3,
140 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
141 ARM_HWCAP_ARM_FPA = 1 << 5,
142 ARM_HWCAP_ARM_VFP = 1 << 6,
143 ARM_HWCAP_ARM_EDSP = 1 << 7,
146 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
147 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
148 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
153 #ifdef TARGET_SPARC64
155 #define ELF_START_MMAP 0x80000000
157 #define elf_check_arch(x) ( (x) == EM_SPARCV9 )
159 #define ELF_CLASS ELFCLASS64
160 #define ELF_DATA ELFDATA2MSB
161 #define ELF_ARCH EM_SPARCV9
163 #define STACK_BIAS 2047
165 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
168 regs->pc = infop->entry;
169 regs->npc = regs->pc + 4;
171 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
175 #define ELF_START_MMAP 0x80000000
177 #define elf_check_arch(x) ( (x) == EM_SPARC )
179 #define ELF_CLASS ELFCLASS32
180 #define ELF_DATA ELFDATA2MSB
181 #define ELF_ARCH EM_SPARC
183 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
186 regs->pc = infop->entry;
187 regs->npc = regs->pc + 4;
189 regs->u_regs[14] = infop->start_stack - 16 * 4;
197 #define ELF_START_MMAP 0x80000000
201 #define elf_check_arch(x) ( (x) == EM_PPC64 )
203 #define ELF_CLASS ELFCLASS64
207 #define elf_check_arch(x) ( (x) == EM_PPC )
209 #define ELF_CLASS ELFCLASS32
213 #ifdef TARGET_WORDS_BIGENDIAN
214 #define ELF_DATA ELFDATA2MSB
216 #define ELF_DATA ELFDATA2LSB
218 #define ELF_ARCH EM_PPC
221 * We need to put in some extra aux table entries to tell glibc what
222 * the cache block size is, so it can use the dcbz instruction safely.
224 #define AT_DCACHEBSIZE 19
225 #define AT_ICACHEBSIZE 20
226 #define AT_UCACHEBSIZE 21
227 /* A special ignored type value for PPC, for glibc compatibility. */
228 #define AT_IGNOREPPC 22
230 * The requirements here are:
231 * - keep the final alignment of sp (sp & 0xf)
232 * - make sure the 32-bit value at the first 16 byte aligned position of
233 * AUXV is greater than 16 for glibc compatibility.
234 * AT_IGNOREPPC is used for that.
235 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
236 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
238 #define DLINFO_ARCH_ITEMS 5
239 #define ARCH_DLINFO \
241 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
242 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
243 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
245 * Now handle glibc compatibility. \
247 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
248 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
251 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
253 target_ulong pos = infop->start_stack;
256 target_ulong entry, toc;
259 _regs->msr = 1 << MSR_PR; /* Set user mode */
260 _regs->gpr[1] = infop->start_stack;
262 entry = ldq_raw(infop->entry) + infop->load_addr;
263 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
265 infop->entry = entry;
267 _regs->nip = infop->entry;
268 /* Note that isn't exactly what regular kernel does
269 * but this is what the ABI wants and is needed to allow
270 * execution of PPC BSD programs.
272 _regs->gpr[3] = tgetl(pos);
273 pos += sizeof(target_ulong);
275 for (tmp = 1; tmp != 0; pos += sizeof(target_ulong))
280 #define USE_ELF_CORE_DUMP
281 #define ELF_EXEC_PAGESIZE 4096
287 #define ELF_START_MMAP 0x80000000
289 #define elf_check_arch(x) ( (x) == EM_MIPS )
292 #define ELF_CLASS ELFCLASS64
294 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_MIPS
303 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
305 regs->cp0_status = CP0St_UM;
306 regs->cp0_epc = infop->entry;
307 regs->regs[29] = infop->start_stack;
310 #define USE_ELF_CORE_DUMP
311 #define ELF_EXEC_PAGESIZE 4096
313 #endif /* TARGET_MIPS */
317 #define ELF_START_MMAP 0x80000000
319 #define elf_check_arch(x) ( (x) == EM_SH )
321 #define ELF_CLASS ELFCLASS32
322 #define ELF_DATA ELFDATA2LSB
323 #define ELF_ARCH EM_SH
325 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
327 /* Check other registers XXXXX */
328 regs->pc = infop->entry;
329 regs->regs[15] = infop->start_stack;
332 #define USE_ELF_CORE_DUMP
333 #define ELF_EXEC_PAGESIZE 4096
339 #define ELF_START_MMAP 0x80000000
341 #define elf_check_arch(x) ( (x) == EM_68K )
343 #define ELF_CLASS ELFCLASS32
344 #define ELF_DATA ELFDATA2MSB
345 #define ELF_ARCH EM_68K
347 /* ??? Does this need to do anything?
348 #define ELF_PLAT_INIT(_r) */
350 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
352 regs->usp = infop->start_stack;
354 regs->pc = infop->entry;
357 #define USE_ELF_CORE_DUMP
358 #define ELF_EXEC_PAGESIZE 8192
364 #define ELF_START_MMAP (0x30000000000ULL)
366 #define elf_check_arch(x) ( (x) == ELF_ARCH )
368 #define ELF_CLASS ELFCLASS64
369 #define ELF_DATA ELFDATA2MSB
370 #define ELF_ARCH EM_ALPHA
372 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
374 regs->pc = infop->entry;
376 regs->usp = infop->start_stack;
377 regs->unique = infop->start_data; /* ? */
378 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
379 regs->unique, infop->start_data);
382 #define USE_ELF_CORE_DUMP
383 #define ELF_EXEC_PAGESIZE 8192
385 #endif /* TARGET_ALPHA */
388 #define ELF_PLATFORM (NULL)
399 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
400 unsigned int a_text; /* length of text, in bytes */
401 unsigned int a_data; /* length of data, in bytes */
402 unsigned int a_bss; /* length of uninitialized data area, in bytes */
403 unsigned int a_syms; /* length of symbol table data in file, in bytes */
404 unsigned int a_entry; /* start address */
405 unsigned int a_trsize; /* length of relocation info for text, in bytes */
406 unsigned int a_drsize; /* length of relocation info for data, in bytes */
410 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
416 /* max code+data+bss space allocated to elf interpreter */
417 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
419 /* max code+data+bss+brk space allocated to ET_DYN executables */
420 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
422 /* from personality.h */
424 /* Flags for bug emulation. These occupy the top three bytes. */
425 #define STICKY_TIMEOUTS 0x4000000
426 #define WHOLE_SECONDS 0x2000000
428 /* Personality types. These go in the low byte. Avoid using the top bit,
429 * it will conflict with error returns.
431 #define PER_MASK (0x00ff)
432 #define PER_LINUX (0x0000)
433 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
434 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
435 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
436 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
437 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
438 #define PER_BSD (0x0006)
439 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
441 /* Necessary parameters */
442 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
443 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
444 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
446 #define INTERPRETER_NONE 0
447 #define INTERPRETER_AOUT 1
448 #define INTERPRETER_ELF 2
450 #define DLINFO_ITEMS 12
452 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
457 extern unsigned long x86_stack_size;
459 static int load_aout_interp(void * exptr, int interp_fd);
462 static void bswap_ehdr(struct elfhdr *ehdr)
464 bswap16s(&ehdr->e_type); /* Object file type */
465 bswap16s(&ehdr->e_machine); /* Architecture */
466 bswap32s(&ehdr->e_version); /* Object file version */
467 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
468 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
469 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
470 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
471 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
472 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
473 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
474 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
475 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
476 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
479 static void bswap_phdr(struct elf_phdr *phdr)
481 bswap32s(&phdr->p_type); /* Segment type */
482 bswaptls(&phdr->p_offset); /* Segment file offset */
483 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
484 bswaptls(&phdr->p_paddr); /* Segment physical address */
485 bswaptls(&phdr->p_filesz); /* Segment size in file */
486 bswaptls(&phdr->p_memsz); /* Segment size in memory */
487 bswap32s(&phdr->p_flags); /* Segment flags */
488 bswaptls(&phdr->p_align); /* Segment alignment */
491 static void bswap_shdr(struct elf_shdr *shdr)
493 bswap32s(&shdr->sh_name);
494 bswap32s(&shdr->sh_type);
495 bswaptls(&shdr->sh_flags);
496 bswaptls(&shdr->sh_addr);
497 bswaptls(&shdr->sh_offset);
498 bswaptls(&shdr->sh_size);
499 bswap32s(&shdr->sh_link);
500 bswap32s(&shdr->sh_info);
501 bswaptls(&shdr->sh_addralign);
502 bswaptls(&shdr->sh_entsize);
505 static void bswap_sym(struct elf_sym *sym)
507 bswap32s(&sym->st_name);
508 bswaptls(&sym->st_value);
509 bswaptls(&sym->st_size);
510 bswap16s(&sym->st_shndx);
515 * 'copy_elf_strings()' copies argument/envelope strings from user
516 * memory to free pages in kernel mem. These are in a format ready
517 * to be put directly into the top of new user memory.
520 static target_ulong copy_elf_strings(int argc,char ** argv, void **page,
523 char *tmp, *tmp1, *pag = NULL;
527 return 0; /* bullet-proofing */
532 fprintf(stderr, "VFS: argc is wrong");
538 if (p < len) { /* this shouldn't happen - 128kB */
544 offset = p % TARGET_PAGE_SIZE;
545 pag = (char *)page[p/TARGET_PAGE_SIZE];
547 pag = (char *)malloc(TARGET_PAGE_SIZE);
548 memset(pag, 0, TARGET_PAGE_SIZE);
549 page[p/TARGET_PAGE_SIZE] = pag;
554 if (len == 0 || offset == 0) {
555 *(pag + offset) = *tmp;
558 int bytes_to_copy = (len > offset) ? offset : len;
559 tmp -= bytes_to_copy;
561 offset -= bytes_to_copy;
562 len -= bytes_to_copy;
563 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
570 target_ulong setup_arg_pages(target_ulong p, struct linux_binprm * bprm,
571 struct image_info * info)
573 target_ulong stack_base, size, error;
576 /* Create enough stack to hold everything. If we don't use
577 * it for args, we'll use it for something else...
579 size = x86_stack_size;
580 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
581 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
582 error = target_mmap(0,
583 size + qemu_host_page_size,
584 PROT_READ | PROT_WRITE,
585 MAP_PRIVATE | MAP_ANONYMOUS,
591 /* we reserve one extra page at the top of the stack as guard */
592 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
594 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
597 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
601 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
604 stack_base += TARGET_PAGE_SIZE;
609 static void set_brk(target_ulong start, target_ulong end)
611 /* page-align the start and end addresses... */
612 start = HOST_PAGE_ALIGN(start);
613 end = HOST_PAGE_ALIGN(end);
616 if(target_mmap(start, end - start,
617 PROT_READ | PROT_WRITE | PROT_EXEC,
618 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
619 perror("cannot mmap brk");
625 /* We need to explicitly zero any fractional pages after the data
626 section (i.e. bss). This would contain the junk from the file that
627 should not be in memory. */
628 static void padzero(target_ulong elf_bss, target_ulong last_bss)
632 if (elf_bss >= last_bss)
635 /* XXX: this is really a hack : if the real host page size is
636 smaller than the target page size, some pages after the end
637 of the file may not be mapped. A better fix would be to
638 patch target_mmap(), but it is more complicated as the file
639 size must be known */
640 if (qemu_real_host_page_size < qemu_host_page_size) {
641 target_ulong end_addr, end_addr1;
642 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
643 ~(qemu_real_host_page_size - 1);
644 end_addr = HOST_PAGE_ALIGN(elf_bss);
645 if (end_addr1 < end_addr) {
646 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
647 PROT_READ|PROT_WRITE|PROT_EXEC,
648 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
652 nbyte = elf_bss & (qemu_host_page_size-1);
654 nbyte = qemu_host_page_size - nbyte;
663 static target_ulong create_elf_tables(target_ulong p, int argc, int envc,
664 struct elfhdr * exec,
665 target_ulong load_addr,
666 target_ulong load_bias,
667 target_ulong interp_load_addr, int ibcs,
668 struct image_info *info)
672 target_ulong u_platform;
673 const char *k_platform;
674 const int n = sizeof(elf_addr_t);
678 k_platform = ELF_PLATFORM;
680 size_t len = strlen(k_platform) + 1;
681 sp -= (len + n - 1) & ~(n - 1);
683 memcpy_to_target(sp, k_platform, len);
686 * Force 16 byte _final_ alignment here for generality.
688 sp = sp &~ (target_ulong)15;
689 size = (DLINFO_ITEMS + 1) * 2;
692 #ifdef DLINFO_ARCH_ITEMS
693 size += DLINFO_ARCH_ITEMS * 2;
695 size += envc + argc + 2;
696 size += (!ibcs ? 3 : 1); /* argc itself */
699 sp -= 16 - (size & 15);
701 /* This is correct because Linux defines
702 * elf_addr_t as Elf32_Off / Elf64_Off
704 #if ELF_CLASS == ELFCLASS32
705 #define NEW_AUX_ENT(id, val) do { \
706 sp -= n; tput32(sp, val); \
707 sp -= n; tput32(sp, id); \
710 #define NEW_AUX_ENT(id, val) do { \
711 sp -= n; tput64(sp, val); \
712 sp -= n; tput64(sp, id); \
715 NEW_AUX_ENT (AT_NULL, 0);
717 /* There must be exactly DLINFO_ITEMS entries here. */
718 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
719 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
720 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
721 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
722 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
723 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
724 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
725 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
726 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
727 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
728 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
729 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
731 NEW_AUX_ENT(AT_PLATFORM, u_platform);
734 * ARCH_DLINFO must come last so platform specific code can enforce
735 * special alignment requirements on the AUXV if necessary (eg. PPC).
741 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
746 static target_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
748 target_ulong *interp_load_addr)
750 struct elf_phdr *elf_phdata = NULL;
751 struct elf_phdr *eppnt;
752 target_ulong load_addr = 0;
753 int load_addr_set = 0;
755 target_ulong last_bss, elf_bss;
764 bswap_ehdr(interp_elf_ex);
766 /* First of all, some simple consistency checks */
767 if ((interp_elf_ex->e_type != ET_EXEC &&
768 interp_elf_ex->e_type != ET_DYN) ||
769 !elf_check_arch(interp_elf_ex->e_machine)) {
770 return ~((target_ulong)0UL);
774 /* Now read in all of the header information */
776 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
777 return ~(target_ulong)0UL;
779 elf_phdata = (struct elf_phdr *)
780 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
783 return ~((target_ulong)0UL);
786 * If the size of this structure has changed, then punt, since
787 * we will be doing the wrong thing.
789 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
791 return ~((target_ulong)0UL);
794 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
796 retval = read(interpreter_fd,
798 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
801 perror("load_elf_interp");
808 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
813 if (interp_elf_ex->e_type == ET_DYN) {
814 /* in order to avoid hardcoding the interpreter load
815 address in qemu, we allocate a big enough memory zone */
816 error = target_mmap(0, INTERP_MAP_SIZE,
817 PROT_NONE, MAP_PRIVATE | MAP_ANON,
828 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
829 if (eppnt->p_type == PT_LOAD) {
830 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
832 target_ulong vaddr = 0;
835 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
836 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
837 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
838 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
839 elf_type |= MAP_FIXED;
840 vaddr = eppnt->p_vaddr;
842 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
843 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
847 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
851 close(interpreter_fd);
853 return ~((target_ulong)0UL);
856 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
862 * Find the end of the file mapping for this phdr, and keep
863 * track of the largest address we see for this.
865 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
866 if (k > elf_bss) elf_bss = k;
869 * Do the same thing for the memory mapping - between
870 * elf_bss and last_bss is the bss section.
872 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
873 if (k > last_bss) last_bss = k;
876 /* Now use mmap to map the library into memory. */
878 close(interpreter_fd);
881 * Now fill out the bss section. First pad the last page up
882 * to the page boundary, and then perform a mmap to make sure
883 * that there are zeromapped pages up to and including the last
886 padzero(elf_bss, last_bss);
887 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
889 /* Map the last of the bss segment */
890 if (last_bss > elf_bss) {
891 target_mmap(elf_bss, last_bss-elf_bss,
892 PROT_READ|PROT_WRITE|PROT_EXEC,
893 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
897 *interp_load_addr = load_addr;
898 return ((target_ulong) interp_elf_ex->e_entry) + load_addr;
901 /* Best attempt to load symbols from this ELF object. */
902 static void load_symbols(struct elfhdr *hdr, int fd)
905 struct elf_shdr sechdr, symtab, strtab;
908 #if (ELF_CLASS == ELFCLASS64)
909 // Disas uses 32 bit symbols
910 struct elf32_sym *syms32 = NULL;
914 lseek(fd, hdr->e_shoff, SEEK_SET);
915 for (i = 0; i < hdr->e_shnum; i++) {
916 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
921 if (sechdr.sh_type == SHT_SYMTAB) {
923 lseek(fd, hdr->e_shoff
924 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
925 if (read(fd, &strtab, sizeof(strtab))
934 return; /* Shouldn't happen... */
937 /* Now know where the strtab and symtab are. Snarf them. */
938 s = malloc(sizeof(*s));
939 s->disas_symtab = malloc(symtab.sh_size);
940 #if (ELF_CLASS == ELFCLASS64)
941 syms32 = malloc(symtab.sh_size / sizeof(struct elf_sym)
942 * sizeof(struct elf32_sym));
944 s->disas_strtab = strings = malloc(strtab.sh_size);
945 if (!s->disas_symtab || !s->disas_strtab)
948 lseek(fd, symtab.sh_offset, SEEK_SET);
949 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
952 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++) {
954 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
956 #if (ELF_CLASS == ELFCLASS64)
957 sym = s->disas_symtab + sizeof(struct elf_sym)*i;
958 syms32[i].st_name = sym->st_name;
959 syms32[i].st_info = sym->st_info;
960 syms32[i].st_other = sym->st_other;
961 syms32[i].st_shndx = sym->st_shndx;
962 syms32[i].st_value = sym->st_value & 0xffffffff;
963 syms32[i].st_size = sym->st_size & 0xffffffff;
967 #if (ELF_CLASS == ELFCLASS64)
968 free(s->disas_symtab);
969 s->disas_symtab = syms32;
971 lseek(fd, strtab.sh_offset, SEEK_SET);
972 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
974 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
979 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
980 struct image_info * info)
982 struct elfhdr elf_ex;
983 struct elfhdr interp_elf_ex;
984 struct exec interp_ex;
985 int interpreter_fd = -1; /* avoid warning */
986 target_ulong load_addr, load_bias;
987 int load_addr_set = 0;
988 unsigned int interpreter_type = INTERPRETER_NONE;
989 unsigned char ibcs2_interpreter;
991 target_ulong mapped_addr;
992 struct elf_phdr * elf_ppnt;
993 struct elf_phdr *elf_phdata;
994 target_ulong elf_bss, k, elf_brk;
996 char * elf_interpreter;
997 target_ulong elf_entry, interp_load_addr = 0;
999 target_ulong start_code, end_code, start_data, end_data;
1000 target_ulong reloc_func_desc = 0;
1001 target_ulong elf_stack;
1002 char passed_fileno[6];
1004 ibcs2_interpreter = 0;
1008 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1010 bswap_ehdr(&elf_ex);
1013 /* First of all, some simple consistency checks */
1014 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1015 (! elf_check_arch(elf_ex.e_machine))) {
1019 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1020 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1021 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1026 /* Now read in all of the header information */
1027 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1028 if (elf_phdata == NULL) {
1032 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1034 retval = read(bprm->fd, (char *) elf_phdata,
1035 elf_ex.e_phentsize * elf_ex.e_phnum);
1039 perror("load_elf_binary");
1046 elf_ppnt = elf_phdata;
1047 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1048 bswap_phdr(elf_ppnt);
1051 elf_ppnt = elf_phdata;
1057 elf_stack = ~((target_ulong)0UL);
1058 elf_interpreter = NULL;
1059 start_code = ~((target_ulong)0UL);
1064 for(i=0;i < elf_ex.e_phnum; i++) {
1065 if (elf_ppnt->p_type == PT_INTERP) {
1066 if ( elf_interpreter != NULL )
1069 free(elf_interpreter);
1074 /* This is the program interpreter used for
1075 * shared libraries - for now assume that this
1076 * is an a.out format binary
1079 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1081 if (elf_interpreter == NULL) {
1087 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1089 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1092 perror("load_elf_binary2");
1096 /* If the program interpreter is one of these two,
1097 then assume an iBCS2 image. Otherwise assume
1098 a native linux image. */
1100 /* JRP - Need to add X86 lib dir stuff here... */
1102 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1103 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1104 ibcs2_interpreter = 1;
1108 printf("Using ELF interpreter %s\n", elf_interpreter);
1111 retval = open(path(elf_interpreter), O_RDONLY);
1113 interpreter_fd = retval;
1116 perror(elf_interpreter);
1118 /* retval = -errno; */
1123 retval = lseek(interpreter_fd, 0, SEEK_SET);
1125 retval = read(interpreter_fd,bprm->buf,128);
1129 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1130 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1133 perror("load_elf_binary3");
1136 free(elf_interpreter);
1144 /* Some simple consistency checks for the interpreter */
1145 if (elf_interpreter){
1146 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1148 /* Now figure out which format our binary is */
1149 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1150 (N_MAGIC(interp_ex) != QMAGIC)) {
1151 interpreter_type = INTERPRETER_ELF;
1154 if (interp_elf_ex.e_ident[0] != 0x7f ||
1155 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1156 interpreter_type &= ~INTERPRETER_ELF;
1159 if (!interpreter_type) {
1160 free(elf_interpreter);
1167 /* OK, we are done with that, now set up the arg stuff,
1168 and then start this sucker up */
1173 if (interpreter_type == INTERPRETER_AOUT) {
1174 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1175 passed_p = passed_fileno;
1177 if (elf_interpreter) {
1178 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1183 if (elf_interpreter) {
1184 free(elf_interpreter);
1192 /* OK, This is the point of no return */
1195 info->start_mmap = (target_ulong)ELF_START_MMAP;
1197 elf_entry = (target_ulong) elf_ex.e_entry;
1199 /* Do this so that we can load the interpreter, if need be. We will
1200 change some of these later */
1202 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1203 info->start_stack = bprm->p;
1205 /* Now we do a little grungy work by mmaping the ELF image into
1206 * the correct location in memory. At this point, we assume that
1207 * the image should be loaded at fixed address, not at a variable
1211 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1216 if (elf_ppnt->p_type != PT_LOAD)
1219 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1220 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1221 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1222 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1223 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1224 elf_flags |= MAP_FIXED;
1225 } else if (elf_ex.e_type == ET_DYN) {
1226 /* Try and get dynamic programs out of the way of the default mmap
1227 base, as well as whatever program they might try to exec. This
1228 is because the brk will follow the loader, and is not movable. */
1229 /* NOTE: for qemu, we do a big mmap to get enough space
1230 without hardcoding any address */
1231 error = target_mmap(0, ET_DYN_MAP_SIZE,
1232 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1238 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1241 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1242 (elf_ppnt->p_filesz +
1243 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1245 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1247 (elf_ppnt->p_offset -
1248 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1254 #ifdef LOW_ELF_STACK
1255 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1256 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1259 if (!load_addr_set) {
1261 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1262 if (elf_ex.e_type == ET_DYN) {
1263 load_bias += error -
1264 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1265 load_addr += load_bias;
1266 reloc_func_desc = load_bias;
1269 k = elf_ppnt->p_vaddr;
1274 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1277 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1281 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1282 if (k > elf_brk) elf_brk = k;
1285 elf_entry += load_bias;
1286 elf_bss += load_bias;
1287 elf_brk += load_bias;
1288 start_code += load_bias;
1289 end_code += load_bias;
1290 start_data += load_bias;
1291 end_data += load_bias;
1293 if (elf_interpreter) {
1294 if (interpreter_type & 1) {
1295 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1297 else if (interpreter_type & 2) {
1298 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1301 reloc_func_desc = interp_load_addr;
1303 close(interpreter_fd);
1304 free(elf_interpreter);
1306 if (elf_entry == ~((target_ulong)0UL)) {
1307 printf("Unable to load interpreter\n");
1317 load_symbols(&elf_ex, bprm->fd);
1319 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1320 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1322 #ifdef LOW_ELF_STACK
1323 info->start_stack = bprm->p = elf_stack - 4;
1325 bprm->p = create_elf_tables(bprm->p,
1329 load_addr, load_bias,
1331 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1333 info->load_addr = reloc_func_desc;
1334 info->start_brk = info->brk = elf_brk;
1335 info->end_code = end_code;
1336 info->start_code = start_code;
1337 info->start_data = start_data;
1338 info->end_data = end_data;
1339 info->start_stack = bprm->p;
1341 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1343 set_brk(elf_bss, elf_brk);
1345 padzero(elf_bss, elf_brk);
1348 printf("(start_brk) %x\n" , info->start_brk);
1349 printf("(end_code) %x\n" , info->end_code);
1350 printf("(start_code) %x\n" , info->start_code);
1351 printf("(end_data) %x\n" , info->end_data);
1352 printf("(start_stack) %x\n" , info->start_stack);
1353 printf("(brk) %x\n" , info->brk);
1356 if ( info->personality == PER_SVR4 )
1358 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1359 and some applications "depend" upon this behavior.
1360 Since we do not have the power to recompile these, we
1361 emulate the SVr4 behavior. Sigh. */
1362 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1363 MAP_FIXED | MAP_PRIVATE, -1, 0);
1366 info->entry = elf_entry;
1371 static int load_aout_interp(void * exptr, int interp_fd)
1373 printf("a.out interpreter not yet supported\n");
1377 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1379 init_thread(regs, infop);