#define ELF_START_MMAP 0x80000000
-typedef uint32_t elf_greg_t;
-
-#define ELF_NGREG (sizeof (struct target_pt_regs) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct user_i387_struct elf_fpregset_t;
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
A value of 0 tells we have no such handler. */
#define ELF_PLAT_INIT(_r) _r->edx = 0
+static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
+{
+ regs->esp = infop->start_stack;
+ regs->eip = infop->entry;
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#endif
+
+#ifdef TARGET_ARM
+
+#define ELF_START_MMAP 0x80000000
+
+#define elf_check_arch(x) ( (x) == EM_ARM )
+
+#define ELF_CLASS ELFCLASS32
+#ifdef TARGET_WORDS_BIGENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+#define ELF_ARCH EM_ARM
+
+#define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
+
+static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
+{
+ target_long *stack = (void *)infop->start_stack;
+ memset(regs, 0, sizeof(*regs));
+ regs->ARM_cpsr = 0x10;
+ regs->ARM_pc = infop->entry;
+ regs->ARM_sp = infop->start_stack;
+ regs->ARM_r2 = tswapl(stack[2]); /* envp */
+ regs->ARM_r1 = tswapl(stack[1]); /* argv */
+ /* XXX: it seems that r0 is zeroed after ! */
+ // regs->ARM_r0 = tswapl(stack[0]); /* argc */
+}
+
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ZMAGIC 0413
#define QMAGIC 0314
-#define X86_STACK_TOP 0x7d000000
-
/* max code+data+bss space allocated to elf interpreter */
#define INTERP_MAP_SIZE (32 * 1024 * 1024)
#define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
/* Necessary parameters */
-#define ALPHA_PAGE_SIZE 4096
-#define X86_PAGE_SIZE 4096
-
-#define ALPHA_PAGE_MASK (~(ALPHA_PAGE_SIZE-1))
-#define X86_PAGE_MASK (~(X86_PAGE_SIZE-1))
-
-#define ALPHA_PAGE_ALIGN(addr) ((((addr)+ALPHA_PAGE_SIZE)-1)&ALPHA_PAGE_MASK)
-#define X86_PAGE_ALIGN(addr) ((((addr)+X86_PAGE_SIZE)-1)&X86_PAGE_MASK)
-
#define NGROUPS 32
-#define X86_ELF_EXEC_PAGESIZE X86_PAGE_SIZE
-#define X86_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(X86_ELF_EXEC_PAGESIZE-1))
-#define X86_ELF_PAGEOFFSET(_v) ((_v) & (X86_ELF_EXEC_PAGESIZE-1))
-
-#define ALPHA_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ALPHA_PAGE_SIZE-1))
-#define ALPHA_ELF_PAGEOFFSET(_v) ((_v) & (ALPHA_PAGE_SIZE-1))
+#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
+#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
#define INTERPRETER_NONE 0
#define INTERPRETER_AOUT 1
memcpy(to, from, n);
}
-//extern void * mmap4k();
-#define mmap4k(a, b, c, d, e, f) mmap((void *)(a), b, c, d, e, f)
-
extern unsigned long x86_stack_size;
static int load_aout_interp(void * exptr, int interp_fd);
/* User-space version of kernel get_free_page. Returns a page-aligned
* page-sized chunk of memory.
*/
- retval = mmap4k(0, ALPHA_PAGE_SIZE, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ retval = (void *)target_mmap(0, host_page_size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if((long)retval == -1) {
perror("get_free_page");
static void free_page(void * pageaddr)
{
- (void)munmap(pageaddr, ALPHA_PAGE_SIZE);
+ target_munmap((unsigned long)pageaddr, host_page_size);
}
/*
while (len) {
--p; --tmp; --len;
if (--offset < 0) {
- offset = p % X86_PAGE_SIZE;
- if (!(pag = (char *) page[p/X86_PAGE_SIZE]) &&
- !(pag = (char *) page[p/X86_PAGE_SIZE] =
+ offset = p % TARGET_PAGE_SIZE;
+ if (!(pag = (char *) page[p/TARGET_PAGE_SIZE]) &&
+ !(pag = (char *) page[p/TARGET_PAGE_SIZE] =
(unsigned long *) get_free_page())) {
return 0;
}
* it for args, we'll use it for something else...
*/
size = x86_stack_size;
- if (size < MAX_ARG_PAGES*X86_PAGE_SIZE)
- size = MAX_ARG_PAGES*X86_PAGE_SIZE;
- error = (unsigned long)mmap4k(NULL,
- size + X86_PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0);
+ if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
+ size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
+ error = target_mmap(0,
+ size + host_page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
if (error == -1) {
perror("stk mmap");
exit(-1);
}
/* we reserve one extra page at the top of the stack as guard */
- mprotect((void *)(error + size), X86_PAGE_SIZE, PROT_NONE);
+ target_mprotect(error + size, host_page_size, PROT_NONE);
- stack_base = error + size - MAX_ARG_PAGES*X86_PAGE_SIZE;
+ stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
p += stack_base;
if (bprm->loader) {
if (bprm->page[i]) {
info->rss++;
- memcpy((void *)stack_base, (void *)bprm->page[i], X86_PAGE_SIZE);
+ memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
free_page((void *)bprm->page[i]);
}
- stack_base += X86_PAGE_SIZE;
+ stack_base += TARGET_PAGE_SIZE;
}
return p;
}
static void set_brk(unsigned long start, unsigned long end)
{
/* page-align the start and end addresses... */
- start = ALPHA_PAGE_ALIGN(start);
- end = ALPHA_PAGE_ALIGN(end);
+ start = HOST_PAGE_ALIGN(start);
+ end = HOST_PAGE_ALIGN(end);
if (end <= start)
return;
- if((long)mmap4k(start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
+ if(target_mmap(start, end - start,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
perror("cannot mmap brk");
exit(-1);
}
unsigned long nbyte;
char * fpnt;
- nbyte = elf_bss & (ALPHA_PAGE_SIZE-1); /* was X86_PAGE_SIZE - JRP */
+ nbyte = elf_bss & (host_page_size-1); /* was TARGET_PAGE_SIZE - JRP */
if (nbyte) {
- nbyte = ALPHA_PAGE_SIZE - nbyte;
+ nbyte = host_page_size - nbyte;
fpnt = (char *) elf_bss;
do {
*fpnt++ = 0;
* Force 16 byte alignment here for generality.
*/
sp = (unsigned int *) (~15UL & (unsigned long) p);
- sp -= exec ? DLINFO_ITEMS*2 : 2;
+ sp -= DLINFO_ITEMS*2;
dlinfo = sp;
sp -= envc+1;
envp = sp;
put_user (tswapl(id), dlinfo++); \
put_user (tswapl(val), dlinfo++)
- if (exec) { /* Put this here for an ELF program interpreter */
- NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
- NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
- NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
- NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(ALPHA_PAGE_SIZE));
- NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
- NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
- NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
- NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
- NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
- NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
- NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
- }
+ NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
+ NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
+ NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
+ NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
+ NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
+ NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
+ NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
+ NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
+ NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
+ NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
+ NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
NEW_AUX_ENT (AT_NULL, 0);
#undef NEW_AUX_ENT
+
put_user(tswapl(argc),--sp);
info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
while (argc-->0) {
/* Now read in all of the header information */
- if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > X86_PAGE_SIZE)
+ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
return ~0UL;
elf_phdata = (struct elf_phdr *)
if (interp_elf_ex->e_type == ET_DYN) {
/* in order to avoid harcoding the interpreter load
address in qemu, we allocate a big enough memory zone */
- error = (unsigned long)mmap4k(NULL, INTERP_MAP_SIZE,
- PROT_NONE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
+ error = target_mmap(0, INTERP_MAP_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
if (error == -1) {
perror("mmap");
exit(-1);
elf_type |= MAP_FIXED;
vaddr = eppnt->p_vaddr;
}
- error = (unsigned long)mmap4k(load_addr+X86_ELF_PAGESTART(vaddr),
- eppnt->p_filesz + X86_ELF_PAGEOFFSET(eppnt->p_vaddr),
+ error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
+ eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
elf_prot,
elf_type,
interpreter_fd,
- eppnt->p_offset - X86_ELF_PAGEOFFSET(eppnt->p_vaddr));
+ eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
if (error > -1024UL) {
/* Real error */
* bss page.
*/
padzero(elf_bss);
- elf_bss = X86_ELF_PAGESTART(elf_bss + ALPHA_PAGE_SIZE - 1); /* What we have mapped so far */
+ elf_bss = TARGET_ELF_PAGESTART(elf_bss + host_page_size - 1); /* What we have mapped so far */
/* Map the last of the bss segment */
if (last_bss > elf_bss) {
- mmap4k(elf_bss, last_bss-elf_bss,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ target_mmap(elf_bss, last_bss-elf_bss,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
}
free(elf_phdata);
unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter;
int i;
- void * mapped_addr;
+ unsigned long mapped_addr;
struct elf_phdr * elf_ppnt;
struct elf_phdr *elf_phdata;
unsigned long elf_bss, k, elf_brk;
}
/* Now read in all of the header information */
-
elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
if (elf_phdata == NULL) {
return -ENOMEM;
is because the brk will follow the loader, and is not movable. */
/* NOTE: for qemu, we do a big mmap to get enough space
without harcoding any address */
- error = (unsigned long)mmap4k(NULL, ET_DYN_MAP_SIZE,
- PROT_NONE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
+ error = target_mmap(0, ET_DYN_MAP_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
if (error == -1) {
perror("mmap");
exit(-1);
}
- load_bias = X86_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
+ load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
}
- error = (unsigned long)mmap4k(
- X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
- (elf_ppnt->p_filesz +
- X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
- elf_prot,
- (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
- bprm->fd,
- (elf_ppnt->p_offset -
- X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
+ error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
+ (elf_ppnt->p_filesz +
+ TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
+ elf_prot,
+ (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
+ bprm->fd,
+ (elf_ppnt->p_offset -
+ TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
if (error == -1) {
perror("mmap");
exit(-1);
}
#ifdef LOW_ELF_STACK
- if (X86_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
- elf_stack = X86_ELF_PAGESTART(elf_ppnt->p_vaddr);
+ if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
+ elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
#endif
if (!load_addr_set) {
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
if (elf_ex.e_type == ET_DYN) {
load_bias += error -
- X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
+ TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
load_addr += load_bias;
}
}
create_elf_tables((char *)bprm->p,
bprm->argc,
bprm->envc,
- (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL),
+ &elf_ex,
load_addr, load_bias,
interp_load_addr,
(interpreter_type == INTERPRETER_AOUT ? 0 : 1),
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
- mapped_addr = mmap4k(NULL, ALPHA_PAGE_SIZE, PROT_READ | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, -1, 0);
+ mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, -1, 0);
}
#ifdef ELF_PLAT_INIT
int retval;
int i;
- bprm.p = X86_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
+ bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
bprm.page[i] = 0;
retval = open(filename, O_RDONLY);
}
if(retval>=0) {
/* success. Initialize important registers */
- regs->esp = infop->start_stack;
- regs->eip = infop->entry;
+ init_thread(regs, infop);
return retval;
}