1 /* This is the Linux kernel elf-loading code, ported into user space */
16 /* this flag is uneffective under linux too, should be deleted */
18 #define MAP_DENYWRITE 0
21 /* should probably go in elf.h */
28 #define ELF_PLATFORM get_elf_platform()
30 static const char *get_elf_platform(void)
32 static char elf_platform[] = "i386";
33 int family = (global_env->cpuid_version >> 8) & 0xff;
37 elf_platform[1] = '0' + family;
41 #define ELF_HWCAP get_elf_hwcap()
43 static uint32_t get_elf_hwcap(void)
45 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x80000000
51 * This is used to ensure we don't load something for the wrong architecture.
53 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
56 * These are used to set parameters in the core dumps.
58 #define ELF_CLASS ELFCLASS32
59 #define ELF_DATA ELFDATA2LSB
60 #define ELF_ARCH EM_386
62 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
63 starts %edx contains a pointer to a function which might be
64 registered using `atexit'. This provides a mean for the
65 dynamic linker to call DT_FINI functions for shared libraries
66 that have been loaded before the code runs.
68 A value of 0 tells we have no such handler. */
69 #define ELF_PLAT_INIT(_r) _r->edx = 0
71 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
73 regs->esp = infop->start_stack;
74 regs->eip = infop->entry;
77 #define USE_ELF_CORE_DUMP
78 #define ELF_EXEC_PAGESIZE 4096
84 #define ELF_START_MMAP 0x80000000
86 #define elf_check_arch(x) ( (x) == EM_ARM )
88 #define ELF_CLASS ELFCLASS32
89 #ifdef TARGET_WORDS_BIGENDIAN
90 #define ELF_DATA ELFDATA2MSB
92 #define ELF_DATA ELFDATA2LSB
94 #define ELF_ARCH EM_ARM
96 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
98 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
100 target_long stack = infop->start_stack;
101 memset(regs, 0, sizeof(*regs));
102 regs->ARM_cpsr = 0x10;
103 if (infop->entry & 1)
104 regs->ARM_cpsr |= CPSR_T;
105 regs->ARM_pc = infop->entry & 0xfffffffe;
106 regs->ARM_sp = infop->start_stack;
107 regs->ARM_r2 = tgetl(stack + 8); /* envp */
108 regs->ARM_r1 = tgetl(stack + 4); /* envp */
109 /* XXX: it seems that r0 is zeroed after ! */
110 // regs->ARM_r0 = tgetl(stack); /* argc */
113 #define USE_ELF_CORE_DUMP
114 #define ELF_EXEC_PAGESIZE 4096
118 ARM_HWCAP_ARM_SWP = 1 << 0,
119 ARM_HWCAP_ARM_HALF = 1 << 1,
120 ARM_HWCAP_ARM_THUMB = 1 << 2,
121 ARM_HWCAP_ARM_26BIT = 1 << 3,
122 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
123 ARM_HWCAP_ARM_FPA = 1 << 5,
124 ARM_HWCAP_ARM_VFP = 1 << 6,
125 ARM_HWCAP_ARM_EDSP = 1 << 7,
128 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
129 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
130 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
135 #ifdef TARGET_SPARC64
137 #define ELF_START_MMAP 0x80000000
139 #define elf_check_arch(x) ( (x) == EM_SPARC )
141 #define ELF_CLASS ELFCLASS64
142 #define ELF_DATA ELFDATA2MSB
143 #define ELF_ARCH EM_SPARC
146 #define ELF_PLAT_INIT(_r)
148 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
151 regs->pc = infop->entry;
152 regs->npc = regs->pc + 4;
154 regs->u_regs[14] = infop->start_stack - 16 * 4;
158 #define ELF_START_MMAP 0x80000000
160 #define elf_check_arch(x) ( (x) == EM_SPARC )
162 #define ELF_CLASS ELFCLASS32
163 #define ELF_DATA ELFDATA2MSB
164 #define ELF_ARCH EM_SPARC
167 #define ELF_PLAT_INIT(_r)
169 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
172 regs->pc = infop->entry;
173 regs->npc = regs->pc + 4;
175 regs->u_regs[14] = infop->start_stack - 16 * 4;
183 #define ELF_START_MMAP 0x80000000
185 #define elf_check_arch(x) ( (x) == EM_PPC )
187 #define ELF_CLASS ELFCLASS32
188 #ifdef TARGET_WORDS_BIGENDIAN
189 #define ELF_DATA ELFDATA2MSB
191 #define ELF_DATA ELFDATA2LSB
193 #define ELF_ARCH EM_PPC
195 /* Note that isn't exactly what regular kernel does
196 * but this is what the ABI wants and is needed to allow
197 * execution of PPC BSD programs.
199 #define ELF_PLAT_INIT(_r) \
201 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
202 _r->gpr[3] = bprm->argc; \
203 _r->gpr[4] = (unsigned long)++pos; \
204 for (; tmp != 0; pos++) \
206 _r->gpr[5] = (unsigned long)pos; \
210 * We need to put in some extra aux table entries to tell glibc what
211 * the cache block size is, so it can use the dcbz instruction safely.
213 #define AT_DCACHEBSIZE 19
214 #define AT_ICACHEBSIZE 20
215 #define AT_UCACHEBSIZE 21
216 /* A special ignored type value for PPC, for glibc compatibility. */
217 #define AT_IGNOREPPC 22
219 * The requirements here are:
220 * - keep the final alignment of sp (sp & 0xf)
221 * - make sure the 32-bit value at the first 16 byte aligned position of
222 * AUXV is greater than 16 for glibc compatibility.
223 * AT_IGNOREPPC is used for that.
224 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
225 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
227 #define DLINFO_ARCH_ITEMS 5
228 #define ARCH_DLINFO \
230 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
231 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
232 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
234 * Now handle glibc compatibility. \
236 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
237 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
240 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
242 _regs->msr = 1 << MSR_PR; /* Set user mode */
243 _regs->gpr[1] = infop->start_stack;
244 _regs->nip = infop->entry;
247 #define USE_ELF_CORE_DUMP
248 #define ELF_EXEC_PAGESIZE 4096
254 #define ELF_START_MMAP 0x80000000
256 #define elf_check_arch(x) ( (x) == EM_MIPS )
258 #define ELF_CLASS ELFCLASS32
259 #ifdef TARGET_WORDS_BIGENDIAN
260 #define ELF_DATA ELFDATA2MSB
262 #define ELF_DATA ELFDATA2LSB
264 #define ELF_ARCH EM_MIPS
266 #define ELF_PLAT_INIT(_r)
268 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
270 regs->cp0_status = CP0St_UM;
271 regs->cp0_epc = infop->entry;
272 regs->regs[29] = infop->start_stack;
275 #endif /* TARGET_MIPS */
278 #define ELF_PLATFORM (NULL)
288 * MAX_ARG_PAGES defines the number of pages allocated for arguments
289 * and envelope for the new program. 32 should suffice, this gives
290 * a maximum env+arg of 128kB w/4KB pages!
292 #define MAX_ARG_PAGES 32
295 * This structure is used to hold the arguments that are
296 * used when loading binaries.
298 struct linux_binprm {
300 void *page[MAX_ARG_PAGES];
306 char * filename; /* Name of binary */
307 unsigned long loader, exec;
308 int dont_iput; /* binfmt handler has put inode */
313 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
314 unsigned int a_text; /* length of text, in bytes */
315 unsigned int a_data; /* length of data, in bytes */
316 unsigned int a_bss; /* length of uninitialized data area, in bytes */
317 unsigned int a_syms; /* length of symbol table data in file, in bytes */
318 unsigned int a_entry; /* start address */
319 unsigned int a_trsize; /* length of relocation info for text, in bytes */
320 unsigned int a_drsize; /* length of relocation info for data, in bytes */
324 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
330 /* max code+data+bss space allocated to elf interpreter */
331 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
333 /* max code+data+bss+brk space allocated to ET_DYN executables */
334 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
336 /* from personality.h */
338 /* Flags for bug emulation. These occupy the top three bytes. */
339 #define STICKY_TIMEOUTS 0x4000000
340 #define WHOLE_SECONDS 0x2000000
342 /* Personality types. These go in the low byte. Avoid using the top bit,
343 * it will conflict with error returns.
345 #define PER_MASK (0x00ff)
346 #define PER_LINUX (0x0000)
347 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
348 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
349 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
350 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
351 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
352 #define PER_BSD (0x0006)
353 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
355 /* Necessary parameters */
358 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
359 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
360 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
362 #define INTERPRETER_NONE 0
363 #define INTERPRETER_AOUT 1
364 #define INTERPRETER_ELF 2
366 #define DLINFO_ITEMS 12
368 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
373 extern unsigned long x86_stack_size;
375 static int load_aout_interp(void * exptr, int interp_fd);
378 static void bswap_ehdr(struct elfhdr *ehdr)
380 bswap16s(&ehdr->e_type); /* Object file type */
381 bswap16s(&ehdr->e_machine); /* Architecture */
382 bswap32s(&ehdr->e_version); /* Object file version */
383 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
384 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
385 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
386 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
387 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
388 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
389 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
390 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
391 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
392 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
395 static void bswap_phdr(struct elf_phdr *phdr)
397 bswap32s(&phdr->p_type); /* Segment type */
398 bswaptls(&phdr->p_offset); /* Segment file offset */
399 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
400 bswaptls(&phdr->p_paddr); /* Segment physical address */
401 bswaptls(&phdr->p_filesz); /* Segment size in file */
402 bswaptls(&phdr->p_memsz); /* Segment size in memory */
403 bswap32s(&phdr->p_flags); /* Segment flags */
404 bswaptls(&phdr->p_align); /* Segment alignment */
407 static void bswap_shdr(struct elf_shdr *shdr)
409 bswap32s(&shdr->sh_name);
410 bswap32s(&shdr->sh_type);
411 bswaptls(&shdr->sh_flags);
412 bswaptls(&shdr->sh_addr);
413 bswaptls(&shdr->sh_offset);
414 bswaptls(&shdr->sh_size);
415 bswap32s(&shdr->sh_link);
416 bswap32s(&shdr->sh_info);
417 bswaptls(&shdr->sh_addralign);
418 bswaptls(&shdr->sh_entsize);
421 static void bswap_sym(Elf32_Sym *sym)
423 bswap32s(&sym->st_name);
424 bswap32s(&sym->st_value);
425 bswap32s(&sym->st_size);
426 bswap16s(&sym->st_shndx);
431 * 'copy_string()' copies argument/envelope strings from user
432 * memory to free pages in kernel mem. These are in a format ready
433 * to be put directly into the top of new user memory.
436 static unsigned long copy_strings(int argc,char ** argv, void **page,
439 char *tmp, *tmp1, *pag = NULL;
443 return 0; /* bullet-proofing */
448 fprintf(stderr, "VFS: argc is wrong");
454 if (p < len) { /* this shouldn't happen - 128kB */
460 offset = p % TARGET_PAGE_SIZE;
461 pag = (char *)page[p/TARGET_PAGE_SIZE];
463 pag = (char *)malloc(TARGET_PAGE_SIZE);
464 page[p/TARGET_PAGE_SIZE] = pag;
469 if (len == 0 || offset == 0) {
470 *(pag + offset) = *tmp;
473 int bytes_to_copy = (len > offset) ? offset : len;
474 tmp -= bytes_to_copy;
476 offset -= bytes_to_copy;
477 len -= bytes_to_copy;
478 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
485 static int in_group_p(gid_t g)
487 /* return TRUE if we're in the specified group, FALSE otherwise */
490 gid_t grouplist[NGROUPS];
492 ngroup = getgroups(NGROUPS, grouplist);
493 for(i = 0; i < ngroup; i++) {
494 if(grouplist[i] == g) {
501 static int count(char ** vec)
505 for(i = 0; *vec; i++) {
512 static int prepare_binprm(struct linux_binprm *bprm)
516 int retval, id_change;
518 if(fstat(bprm->fd, &st) < 0) {
523 if(!S_ISREG(mode)) { /* Must be regular file */
526 if(!(mode & 0111)) { /* Must have at least one execute bit set */
530 bprm->e_uid = geteuid();
531 bprm->e_gid = getegid();
536 bprm->e_uid = st.st_uid;
537 if(bprm->e_uid != geteuid()) {
544 * If setgid is set but no group execute bit then this
545 * is a candidate for mandatory locking, not a setgid
548 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
549 bprm->e_gid = st.st_gid;
550 if (!in_group_p(bprm->e_gid)) {
555 memset(bprm->buf, 0, sizeof(bprm->buf));
556 retval = lseek(bprm->fd, 0L, SEEK_SET);
558 retval = read(bprm->fd, bprm->buf, 128);
561 perror("prepare_binprm");
563 /* return(-errno); */
570 static inline void memcpy_to_target(target_ulong dest, const void *src,
575 host_ptr = lock_user(dest, len, 0);
576 memcpy(host_ptr, src, len);
577 unlock_user(host_ptr, dest, 1);
580 unsigned long setup_arg_pages(target_ulong p, struct linux_binprm * bprm,
581 struct image_info * info)
583 target_ulong stack_base, size, error;
586 /* Create enough stack to hold everything. If we don't use
587 * it for args, we'll use it for something else...
589 size = x86_stack_size;
590 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
591 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
592 error = target_mmap(0,
593 size + qemu_host_page_size,
594 PROT_READ | PROT_WRITE,
595 MAP_PRIVATE | MAP_ANONYMOUS,
601 /* we reserve one extra page at the top of the stack as guard */
602 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
604 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
608 bprm->loader += stack_base;
610 bprm->exec += stack_base;
612 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
616 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
619 stack_base += TARGET_PAGE_SIZE;
624 static void set_brk(unsigned long start, unsigned long end)
626 /* page-align the start and end addresses... */
627 start = HOST_PAGE_ALIGN(start);
628 end = HOST_PAGE_ALIGN(end);
631 if(target_mmap(start, end - start,
632 PROT_READ | PROT_WRITE | PROT_EXEC,
633 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
634 perror("cannot mmap brk");
640 /* We need to explicitly zero any fractional pages after the data
641 section (i.e. bss). This would contain the junk from the file that
642 should not be in memory. */
643 static void padzero(unsigned long elf_bss)
647 /* XXX: this is really a hack : if the real host page size is
648 smaller than the target page size, some pages after the end
649 of the file may not be mapped. A better fix would be to
650 patch target_mmap(), but it is more complicated as the file
651 size must be known */
652 if (qemu_real_host_page_size < qemu_host_page_size) {
653 unsigned long end_addr, end_addr1;
654 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
655 ~(qemu_real_host_page_size - 1);
656 end_addr = HOST_PAGE_ALIGN(elf_bss);
657 if (end_addr1 < end_addr) {
658 mmap((void *)end_addr1, end_addr - end_addr1,
659 PROT_READ|PROT_WRITE|PROT_EXEC,
660 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
664 nbyte = elf_bss & (qemu_host_page_size-1);
666 nbyte = qemu_host_page_size - nbyte;
675 static unsigned long create_elf_tables(target_ulong p, int argc, int envc,
676 struct elfhdr * exec,
677 unsigned long load_addr,
678 unsigned long load_bias,
679 unsigned long interp_load_addr, int ibcs,
680 struct image_info *info)
682 target_ulong argv, envp;
685 target_ulong u_platform;
686 const char *k_platform;
687 const int n = sizeof(target_ulong);
691 k_platform = ELF_PLATFORM;
693 size_t len = strlen(k_platform) + 1;
694 sp -= (len + n - 1) & ~(n - 1);
696 memcpy_to_target(sp, k_platform, len);
699 * Force 16 byte _final_ alignment here for generality.
701 sp = sp &~ (target_ulong)15;
702 size = (DLINFO_ITEMS + 1) * 2;
705 #ifdef DLINFO_ARCH_ITEMS
706 size += DLINFO_ARCH_ITEMS * 2;
708 size += envc + argc + 2;
709 size += (!ibcs ? 3 : 1); /* argc itself */
712 sp -= 16 - (size & 15);
714 #define NEW_AUX_ENT(id, val) do { \
715 sp -= n; tputl(sp, val); \
716 sp -= n; tputl(sp, id); \
718 NEW_AUX_ENT (AT_NULL, 0);
720 /* There must be exactly DLINFO_ITEMS entries here. */
721 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
722 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
723 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
724 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
725 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
726 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
727 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
728 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
729 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
730 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
731 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
732 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
734 NEW_AUX_ENT(AT_PLATFORM, u_platform);
737 * ARCH_DLINFO must come last so platform specific code can enforce
738 * special alignment requirements on the AUXV if necessary (eg. PPC).
744 sp -= (envc + 1) * n;
746 sp -= (argc + 1) * n;
749 sp -= n; tputl(sp, envp);
750 sp -= n; tputl(sp, argv);
752 sp -= n; tputl(sp, argc);
755 tputl(argv, p); argv += n;
756 p += target_strlen(p) + 1;
759 info->arg_end = info->env_start = p;
761 tputl(envp, p); envp += n;
762 p += target_strlen(p) + 1;
770 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
772 unsigned long *interp_load_addr)
774 struct elf_phdr *elf_phdata = NULL;
775 struct elf_phdr *eppnt;
776 unsigned long load_addr = 0;
777 int load_addr_set = 0;
779 unsigned long last_bss, elf_bss;
788 bswap_ehdr(interp_elf_ex);
790 /* First of all, some simple consistency checks */
791 if ((interp_elf_ex->e_type != ET_EXEC &&
792 interp_elf_ex->e_type != ET_DYN) ||
793 !elf_check_arch(interp_elf_ex->e_machine)) {
798 /* Now read in all of the header information */
800 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
803 elf_phdata = (struct elf_phdr *)
804 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
810 * If the size of this structure has changed, then punt, since
811 * we will be doing the wrong thing.
813 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
818 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
820 retval = read(interpreter_fd,
822 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
825 perror("load_elf_interp");
832 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
837 if (interp_elf_ex->e_type == ET_DYN) {
838 /* in order to avoid harcoding the interpreter load
839 address in qemu, we allocate a big enough memory zone */
840 error = target_mmap(0, INTERP_MAP_SIZE,
841 PROT_NONE, MAP_PRIVATE | MAP_ANON,
852 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
853 if (eppnt->p_type == PT_LOAD) {
854 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
856 unsigned long vaddr = 0;
859 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
860 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
861 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
862 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
863 elf_type |= MAP_FIXED;
864 vaddr = eppnt->p_vaddr;
866 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
867 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
871 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
875 close(interpreter_fd);
880 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
886 * Find the end of the file mapping for this phdr, and keep
887 * track of the largest address we see for this.
889 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
890 if (k > elf_bss) elf_bss = k;
893 * Do the same thing for the memory mapping - between
894 * elf_bss and last_bss is the bss section.
896 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
897 if (k > last_bss) last_bss = k;
900 /* Now use mmap to map the library into memory. */
902 close(interpreter_fd);
905 * Now fill out the bss section. First pad the last page up
906 * to the page boundary, and then perform a mmap to make sure
907 * that there are zeromapped pages up to and including the last
911 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
913 /* Map the last of the bss segment */
914 if (last_bss > elf_bss) {
915 target_mmap(elf_bss, last_bss-elf_bss,
916 PROT_READ|PROT_WRITE|PROT_EXEC,
917 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
921 *interp_load_addr = load_addr;
922 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
925 /* Best attempt to load symbols from this ELF object. */
926 static void load_symbols(struct elfhdr *hdr, int fd)
929 struct elf_shdr sechdr, symtab, strtab;
933 lseek(fd, hdr->e_shoff, SEEK_SET);
934 for (i = 0; i < hdr->e_shnum; i++) {
935 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
940 if (sechdr.sh_type == SHT_SYMTAB) {
942 lseek(fd, hdr->e_shoff
943 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
944 if (read(fd, &strtab, sizeof(strtab))
953 return; /* Shouldn't happen... */
956 /* Now know where the strtab and symtab are. Snarf them. */
957 s = malloc(sizeof(*s));
958 s->disas_symtab = malloc(symtab.sh_size);
959 s->disas_strtab = strings = malloc(strtab.sh_size);
960 if (!s->disas_symtab || !s->disas_strtab)
963 lseek(fd, symtab.sh_offset, SEEK_SET);
964 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
968 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
969 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
972 lseek(fd, strtab.sh_offset, SEEK_SET);
973 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
975 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
980 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
981 struct image_info * info)
983 struct elfhdr elf_ex;
984 struct elfhdr interp_elf_ex;
985 struct exec interp_ex;
986 int interpreter_fd = -1; /* avoid warning */
987 unsigned long load_addr, load_bias;
988 int load_addr_set = 0;
989 unsigned int interpreter_type = INTERPRETER_NONE;
990 unsigned char ibcs2_interpreter;
992 unsigned long mapped_addr;
993 struct elf_phdr * elf_ppnt;
994 struct elf_phdr *elf_phdata;
995 unsigned long elf_bss, k, elf_brk;
997 char * elf_interpreter;
998 unsigned long elf_entry, interp_load_addr = 0;
1000 unsigned long start_code, end_code, end_data;
1001 unsigned long elf_stack;
1002 char passed_fileno[6];
1004 ibcs2_interpreter = 0;
1008 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1010 bswap_ehdr(&elf_ex);
1013 if (elf_ex.e_ident[0] != 0x7f ||
1014 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
1018 /* First of all, some simple consistency checks */
1019 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1020 (! elf_check_arch(elf_ex.e_machine))) {
1024 /* Now read in all of the header information */
1025 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1026 if (elf_phdata == NULL) {
1030 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1032 retval = read(bprm->fd, (char *) elf_phdata,
1033 elf_ex.e_phentsize * elf_ex.e_phnum);
1037 perror("load_elf_binary");
1044 elf_ppnt = elf_phdata;
1045 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1046 bswap_phdr(elf_ppnt);
1049 elf_ppnt = elf_phdata;
1056 elf_interpreter = NULL;
1061 for(i=0;i < elf_ex.e_phnum; i++) {
1062 if (elf_ppnt->p_type == PT_INTERP) {
1063 if ( elf_interpreter != NULL )
1066 free(elf_interpreter);
1071 /* This is the program interpreter used for
1072 * shared libraries - for now assume that this
1073 * is an a.out format binary
1076 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1078 if (elf_interpreter == NULL) {
1084 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1086 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1089 perror("load_elf_binary2");
1093 /* If the program interpreter is one of these two,
1094 then assume an iBCS2 image. Otherwise assume
1095 a native linux image. */
1097 /* JRP - Need to add X86 lib dir stuff here... */
1099 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1100 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1101 ibcs2_interpreter = 1;
1105 printf("Using ELF interpreter %s\n", elf_interpreter);
1108 retval = open(path(elf_interpreter), O_RDONLY);
1110 interpreter_fd = retval;
1113 perror(elf_interpreter);
1115 /* retval = -errno; */
1120 retval = lseek(interpreter_fd, 0, SEEK_SET);
1122 retval = read(interpreter_fd,bprm->buf,128);
1126 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1127 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1130 perror("load_elf_binary3");
1133 free(elf_interpreter);
1141 /* Some simple consistency checks for the interpreter */
1142 if (elf_interpreter){
1143 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1145 /* Now figure out which format our binary is */
1146 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1147 (N_MAGIC(interp_ex) != QMAGIC)) {
1148 interpreter_type = INTERPRETER_ELF;
1151 if (interp_elf_ex.e_ident[0] != 0x7f ||
1152 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1153 interpreter_type &= ~INTERPRETER_ELF;
1156 if (!interpreter_type) {
1157 free(elf_interpreter);
1164 /* OK, we are done with that, now set up the arg stuff,
1165 and then start this sucker up */
1167 if (!bprm->sh_bang) {
1170 if (interpreter_type == INTERPRETER_AOUT) {
1171 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1172 passed_p = passed_fileno;
1174 if (elf_interpreter) {
1175 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1180 if (elf_interpreter) {
1181 free(elf_interpreter);
1189 /* OK, This is the point of no return */
1192 info->start_mmap = (unsigned long)ELF_START_MMAP;
1194 elf_entry = (unsigned long) elf_ex.e_entry;
1196 /* Do this so that we can load the interpreter, if need be. We will
1197 change some of these later */
1199 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1200 info->start_stack = bprm->p;
1202 /* Now we do a little grungy work by mmaping the ELF image into
1203 * the correct location in memory. At this point, we assume that
1204 * the image should be loaded at fixed address, not at a variable
1208 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1211 unsigned long error;
1213 if (elf_ppnt->p_type != PT_LOAD)
1216 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1217 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1218 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1219 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1220 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1221 elf_flags |= MAP_FIXED;
1222 } else if (elf_ex.e_type == ET_DYN) {
1223 /* Try and get dynamic programs out of the way of the default mmap
1224 base, as well as whatever program they might try to exec. This
1225 is because the brk will follow the loader, and is not movable. */
1226 /* NOTE: for qemu, we do a big mmap to get enough space
1227 without harcoding any address */
1228 error = target_mmap(0, ET_DYN_MAP_SIZE,
1229 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1235 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1238 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1239 (elf_ppnt->p_filesz +
1240 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1242 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1244 (elf_ppnt->p_offset -
1245 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1251 #ifdef LOW_ELF_STACK
1252 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1253 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1256 if (!load_addr_set) {
1258 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1259 if (elf_ex.e_type == ET_DYN) {
1260 load_bias += error -
1261 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1262 load_addr += load_bias;
1265 k = elf_ppnt->p_vaddr;
1268 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1271 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1275 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1276 if (k > elf_brk) elf_brk = k;
1279 elf_entry += load_bias;
1280 elf_bss += load_bias;
1281 elf_brk += load_bias;
1282 start_code += load_bias;
1283 end_code += load_bias;
1284 // start_data += load_bias;
1285 end_data += load_bias;
1287 if (elf_interpreter) {
1288 if (interpreter_type & 1) {
1289 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1291 else if (interpreter_type & 2) {
1292 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1296 close(interpreter_fd);
1297 free(elf_interpreter);
1299 if (elf_entry == ~0UL) {
1300 printf("Unable to load interpreter\n");
1310 load_symbols(&elf_ex, bprm->fd);
1312 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1313 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1315 #ifdef LOW_ELF_STACK
1316 info->start_stack = bprm->p = elf_stack - 4;
1318 bprm->p = create_elf_tables(bprm->p,
1322 load_addr, load_bias,
1324 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1326 if (interpreter_type == INTERPRETER_AOUT)
1327 info->arg_start += strlen(passed_fileno) + 1;
1328 info->start_brk = info->brk = elf_brk;
1329 info->end_code = end_code;
1330 info->start_code = start_code;
1331 info->end_data = end_data;
1332 info->start_stack = bprm->p;
1334 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1336 set_brk(elf_bss, elf_brk);
1341 printf("(start_brk) %x\n" , info->start_brk);
1342 printf("(end_code) %x\n" , info->end_code);
1343 printf("(start_code) %x\n" , info->start_code);
1344 printf("(end_data) %x\n" , info->end_data);
1345 printf("(start_stack) %x\n" , info->start_stack);
1346 printf("(brk) %x\n" , info->brk);
1349 if ( info->personality == PER_SVR4 )
1351 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1352 and some applications "depend" upon this behavior.
1353 Since we do not have the power to recompile these, we
1354 emulate the SVr4 behavior. Sigh. */
1355 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1356 MAP_FIXED | MAP_PRIVATE, -1, 0);
1359 #ifdef ELF_PLAT_INIT
1361 * The ABI may specify that certain registers be set up in special
1362 * ways (on i386 %edx is the address of a DT_FINI function, for
1363 * example. This macro performs whatever initialization to
1364 * the regs structure is required.
1366 ELF_PLAT_INIT(regs);
1370 info->entry = elf_entry;
1377 int elf_exec(const char * filename, char ** argv, char ** envp,
1378 struct target_pt_regs * regs, struct image_info *infop)
1380 struct linux_binprm bprm;
1384 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1385 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1387 retval = open(filename, O_RDONLY);
1391 bprm.filename = (char *)filename;
1396 bprm.argc = count(argv);
1397 bprm.envc = count(envp);
1399 retval = prepare_binprm(&bprm);
1402 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1404 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1405 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1412 retval = load_elf_binary(&bprm,regs,infop);
1416 /* success. Initialize important registers */
1417 init_thread(regs, infop);
1421 /* Something went wrong, return the inode and free the argument pages*/
1422 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1429 static int load_aout_interp(void * exptr, int interp_fd)
1431 printf("a.out interpreter not yet supported\n");