//#define DEBUG
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#define PAGE_MASK ~(PAGE_SIZE - 1)
-#endif
-
//#include <linux/msdos_fs.h>
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2])
-void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
-void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
-long do_sigreturn(CPUX86State *env);
-long do_rt_sigreturn(CPUX86State *env);
+
+#if defined(__powerpc__)
+#undef __syscall_nr
+#undef __sc_loadargs_0
+#undef __sc_loadargs_1
+#undef __sc_loadargs_2
+#undef __sc_loadargs_3
+#undef __sc_loadargs_4
+#undef __sc_loadargs_5
+#undef __sc_asm_input_0
+#undef __sc_asm_input_1
+#undef __sc_asm_input_2
+#undef __sc_asm_input_3
+#undef __sc_asm_input_4
+#undef __sc_asm_input_5
+#undef _syscall0
+#undef _syscall1
+#undef _syscall2
+#undef _syscall3
+#undef _syscall4
+#undef _syscall5
+
+/* need to redefine syscalls as Linux kernel defines are incorrect for
+ the clobber list */
+/* On powerpc a system call basically clobbers the same registers like a
+ * function call, with the exception of LR (which is needed for the
+ * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
+ * an error return status).
+ */
+
+#define __syscall_nr(nr, type, name, args...) \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ __sc_loadargs_##nr(name, args); \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %0 " \
+ : "=&r" (__sc_0), \
+ "=&r" (__sc_3), "=&r" (__sc_4), \
+ "=&r" (__sc_5), "=&r" (__sc_6), \
+ "=&r" (__sc_7) \
+ : __sc_asm_input_##nr \
+ : "cr0", "ctr", "memory", \
+ "r8", "r9", "r10","r11", "r12"); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ if (__sc_err & 0x10000000) \
+ { \
+ errno = __sc_ret; \
+ __sc_ret = -1; \
+ } \
+ return (type) __sc_ret
+
+#define __sc_loadargs_0(name, dummy...) \
+ __sc_0 = __NR_##name
+#define __sc_loadargs_1(name, arg1) \
+ __sc_loadargs_0(name); \
+ __sc_3 = (unsigned long) (arg1)
+#define __sc_loadargs_2(name, arg1, arg2) \
+ __sc_loadargs_1(name, arg1); \
+ __sc_4 = (unsigned long) (arg2)
+#define __sc_loadargs_3(name, arg1, arg2, arg3) \
+ __sc_loadargs_2(name, arg1, arg2); \
+ __sc_5 = (unsigned long) (arg3)
+#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
+ __sc_loadargs_3(name, arg1, arg2, arg3); \
+ __sc_6 = (unsigned long) (arg4)
+#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
+ __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
+ __sc_7 = (unsigned long) (arg5)
+
+#define __sc_asm_input_0 "0" (__sc_0)
+#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
+#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
+#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
+#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
+#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ __syscall_nr(0, type, name); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ __syscall_nr(1, type, name, arg1); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ __syscall_nr(2, type, name, arg1, arg2); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ __syscall_nr(3, type, name, arg1, arg2, arg3); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
+}
+#endif
#define __NR_sys_uname __NR_uname
#define __NR_sys_getcwd1 __NR_getcwd
if (new_brk < target_original_brk)
return -ENOMEM;
- brk_page = (char *)(((unsigned long)target_brk + PAGE_SIZE - 1) & PAGE_MASK);
+ brk_page = (char *)HOST_PAGE_ALIGN((unsigned long)target_brk);
/* If the new brk is less than this, set it and we're done... */
if (new_brk < brk_page) {
}
/* We need to allocate more memory after the brk... */
- new_alloc_size = ((new_brk - brk_page + 1)+(PAGE_SIZE-1)) & PAGE_MASK;
- mapped_addr = get_errno((long)mmap((caddr_t)brk_page, new_alloc_size,
- PROT_READ|PROT_WRITE,
- MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
-
+ new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
+ mapped_addr = get_errno(target_mmap((unsigned long)brk_page, new_alloc_size,
+ PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
if (is_error(mapped_addr)) {
return mapped_addr;
} else {
#endif
}
+#if defined(__alpha__)
+#define HOST_HZ 1024
+#else
+#define HOST_HZ 100
+#endif
+
+static inline long host_to_target_clock_t(long ticks)
+{
+#if HOST_HZ == TARGET_HZ
+ return ticks;
+#else
+ return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
+#endif
+}
+
+static inline void host_to_target_rusage(struct target_rusage *target_rusage,
+ const struct rusage *rusage)
+{
+ target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
+ target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
+ target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
+ target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
+ target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
+ target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
+ target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
+ target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
+ target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
+ target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
+ target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
+ target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
+ target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
+ target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
+ target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
+ target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
+ target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
+ target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
+}
+
static inline void target_to_host_timeval(struct timeval *tv,
const struct target_timeval *target_tv)
{
#undef STRUCT_SPECIAL
typedef struct IOCTLEntry {
- int target_cmd;
- int host_cmd;
+ unsigned int target_cmd;
+ unsigned int host_cmd;
const char *name;
int access;
const argtype arg_type[5];
#define MAX_STRUCT_SIZE 4096
-const IOCTLEntry ioctl_entries[] = {
+IOCTLEntry ioctl_entries[] = {
#define IOCTL(cmd, access, types...) \
{ TARGET_ ## cmd, cmd, #cmd, access, { types } },
#include "ioctls.h"
.align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
};
-#ifdef TARGET_I386
+static bitmask_transtbl mmap_flags_tbl[] = {
+ { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
+ { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
+ { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
+ { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
+ { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
+ { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
+ { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
+ { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
+ { 0, 0, 0, 0 }
+};
+
+#if defined(TARGET_I386)
/* NOTE: there is really one LDT for all the threads */
uint8_t *ldt_table;
0x7000;
if (!oldmode)
entry_2 |= (useable << 20);
-
+
/* Install the new entry ... */
install:
lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
return ret;
}
-/* vm86 emulation */
-
-#define SAFE_MASK (0xDD5)
-
-int do_vm86(CPUX86State *env, long subfunction,
- struct target_vm86plus_struct * target_v86)
-{
- TaskState *ts = env->opaque;
- int ret;
-
- switch (subfunction) {
- case TARGET_VM86_REQUEST_IRQ:
- case TARGET_VM86_FREE_IRQ:
- case TARGET_VM86_GET_IRQ_BITS:
- case TARGET_VM86_GET_AND_RESET_IRQ:
- gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction);
- ret = -EINVAL;
- goto out;
- case TARGET_VM86_PLUS_INSTALL_CHECK:
- /* NOTE: on old vm86 stuff this will return the error
- from verify_area(), because the subfunction is
- interpreted as (invalid) address to vm86_struct.
- So the installation check works.
- */
- ret = 0;
- goto out;
- }
-
- ts->target_v86 = target_v86;
- /* save current CPU regs */
- ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */
- ts->vm86_saved_regs.ebx = env->regs[R_EBX];
- ts->vm86_saved_regs.ecx = env->regs[R_ECX];
- ts->vm86_saved_regs.edx = env->regs[R_EDX];
- ts->vm86_saved_regs.esi = env->regs[R_ESI];
- ts->vm86_saved_regs.edi = env->regs[R_EDI];
- ts->vm86_saved_regs.ebp = env->regs[R_EBP];
- ts->vm86_saved_regs.esp = env->regs[R_ESP];
- ts->vm86_saved_regs.eflags = env->eflags;
- ts->vm86_saved_regs.eip = env->eip;
- ts->vm86_saved_regs.cs = env->segs[R_CS];
- ts->vm86_saved_regs.ss = env->segs[R_SS];
- ts->vm86_saved_regs.ds = env->segs[R_DS];
- ts->vm86_saved_regs.es = env->segs[R_ES];
- ts->vm86_saved_regs.fs = env->segs[R_FS];
- ts->vm86_saved_regs.gs = env->segs[R_GS];
-
- /* build vm86 CPU state */
- env->eflags = (env->eflags & ~SAFE_MASK) |
- (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK;
-
- env->regs[R_EBX] = tswap32(target_v86->regs.ebx);
- env->regs[R_ECX] = tswap32(target_v86->regs.ecx);
- env->regs[R_EDX] = tswap32(target_v86->regs.edx);
- env->regs[R_ESI] = tswap32(target_v86->regs.esi);
- env->regs[R_EDI] = tswap32(target_v86->regs.edi);
- env->regs[R_EBP] = tswap32(target_v86->regs.ebp);
- env->regs[R_ESP] = tswap32(target_v86->regs.esp);
- env->eip = tswap32(target_v86->regs.eip);
- cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs));
- cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss));
- cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds));
- cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es));
- cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs));
- cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs));
- ret = tswap32(target_v86->regs.eax); /* eax will be restored at
- the end of the syscall */
- /* now the virtual CPU is ready for vm86 execution ! */
- out:
- return ret;
-}
+#endif /* defined(TARGET_I386) */
/* this stack is the equivalent of the kernel stack associated with a
thread/process */
static int clone_func(void *arg)
{
- CPUX86State *env = arg;
+ CPUState *env = arg;
cpu_loop(env);
/* never exits */
return 0;
}
-int do_fork(CPUX86State *env, unsigned int flags, unsigned long newsp)
+int do_fork(CPUState *env, unsigned int flags, unsigned long newsp)
{
int ret;
TaskState *ts;
uint8_t *new_stack;
- CPUX86State *new_env;
+ CPUState *new_env;
if (flags & CLONE_VM) {
- if (!newsp)
- newsp = env->regs[R_ESP];
ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
memset(ts, 0, sizeof(TaskState));
new_stack = ts->stack;
ts->next = first_task_state;
first_task_state = ts;
/* we create a new CPU instance. */
- new_env = cpu_x86_init();
- memcpy(new_env, env, sizeof(CPUX86State));
+ new_env = cpu_init();
+ memcpy(new_env, env, sizeof(CPUState));
+#if defined(TARGET_I386)
+ if (!newsp)
+ newsp = env->regs[R_ESP];
new_env->regs[R_ESP] = newsp;
new_env->regs[R_EAX] = 0;
+#elif defined(TARGET_ARM)
+ if (!newsp)
+ newsp = env->regs[13];
+ new_env->regs[13] = newsp;
+ new_env->regs[0] = 0;
+#else
+#error unsupported target CPU
+#endif
new_env->opaque = ts;
#ifdef __ia64__
ret = clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
return ret;
}
-#endif
+static long do_fcntl(int fd, int cmd, unsigned long arg)
+{
+ struct flock fl;
+ struct target_flock *target_fl = (void *)arg;
+ long ret;
+
+ switch(cmd) {
+ case TARGET_F_GETLK:
+ ret = fcntl(fd, cmd, &fl);
+ if (ret == 0) {
+ target_fl->l_type = tswap16(fl.l_type);
+ target_fl->l_whence = tswap16(fl.l_whence);
+ target_fl->l_start = tswapl(fl.l_start);
+ target_fl->l_len = tswapl(fl.l_len);
+ target_fl->l_pid = tswapl(fl.l_pid);
+ }
+ break;
+
+ case TARGET_F_SETLK:
+ case TARGET_F_SETLKW:
+ fl.l_type = tswap16(target_fl->l_type);
+ fl.l_whence = tswap16(target_fl->l_whence);
+ fl.l_start = tswapl(target_fl->l_start);
+ fl.l_len = tswapl(target_fl->l_len);
+ fl.l_pid = tswapl(target_fl->l_pid);
+ ret = fcntl(fd, cmd, &fl);
+ break;
+
+ case TARGET_F_GETLK64:
+ case TARGET_F_SETLK64:
+ case TARGET_F_SETLKW64:
+ ret = -1;
+ errno = EINVAL;
+ break;
+
+ default:
+ ret = fcntl(fd, cmd, arg);
+ break;
+ }
+ return ret;
+}
+
#define high2lowuid(x) (x)
#define high2lowgid(x) (x)
void syscall_init(void)
{
+ IOCTLEntry *ie;
+ const argtype *arg_type;
+ int size;
+
#define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
#define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
#include "syscall_types.h"
#undef STRUCT
#undef STRUCT_SPECIAL
+
+ /* we patch the ioctl size if necessary. We rely on the fact that
+ no ioctl has all the bits at '1' in the size field */
+ ie = ioctl_entries;
+ while (ie->target_cmd != 0) {
+ if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
+ TARGET_IOC_SIZEMASK) {
+ arg_type = ie->arg_type;
+ if (arg_type[0] != TYPE_PTR) {
+ fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
+ ie->target_cmd);
+ exit(1);
+ }
+ arg_type++;
+ size = thunk_type_size(arg_type, 0);
+ ie->target_cmd = (ie->target_cmd &
+ ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
+ (size << TARGET_IOC_SIZESHIFT);
+ }
+ /* automatic consistency check if same arch */
+#if defined(__i386__) && defined(TARGET_I386)
+ if (ie->target_cmd != ie->host_cmd) {
+ fprintf(stderr, "ERROR: ioctl: target=0x%x host=0x%x\n",
+ ie->target_cmd, ie->host_cmd);
+ }
+#endif
+ ie++;
+ }
}
long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
ret = 0; /* avoid warning */
break;
case TARGET_NR_read:
+ page_unprotect_range((void *)arg2, arg3);
ret = get_errno(read(arg1, (void *)arg2, arg3));
break;
case TARGET_NR_write:
struct tms tms;
ret = get_errno(times(&tms));
if (tmsp) {
- tmsp->tms_utime = tswapl(tms.tms_utime);
- tmsp->tms_stime = tswapl(tms.tms_stime);
- tmsp->tms_cutime = tswapl(tms.tms_cutime);
- tmsp->tms_cstime = tswapl(tms.tms_cstime);
+ tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
+ tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
+ tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
+ tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
}
+ if (!is_error(ret))
+ ret = host_to_target_clock_t(ret);
}
break;
case TARGET_NR_prof:
ret = do_ioctl(arg1, arg2, arg3);
break;
case TARGET_NR_fcntl:
- {
- struct flock fl;
- struct target_flock *target_fl = (void *)arg3;
-
- switch(arg2) {
- case TARGET_F_GETLK:
- ret = get_errno(fcntl(arg1, arg2, &fl));
- if (ret == 0) {
- target_fl->l_type = tswap16(fl.l_type);
- target_fl->l_whence = tswap16(fl.l_whence);
- target_fl->l_start = tswapl(fl.l_start);
- target_fl->l_len = tswapl(fl.l_len);
- target_fl->l_pid = tswapl(fl.l_pid);
- }
- break;
-
- case TARGET_F_SETLK:
- case TARGET_F_SETLKW:
- fl.l_type = tswap16(target_fl->l_type);
- fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswapl(target_fl->l_start);
- fl.l_len = tswapl(target_fl->l_len);
- fl.l_pid = tswapl(target_fl->l_pid);
- ret = get_errno(fcntl(arg1, arg2, &fl));
- break;
-
- case TARGET_F_GETLK64:
- case TARGET_F_SETLK64:
- case TARGET_F_SETLKW64:
- goto unimplemented;
- default:
- ret = get_errno(fcntl(arg1, arg2, arg3));
- break;
- }
+ ret = get_errno(do_fcntl(arg1, arg2, arg3));
break;
- }
case TARGET_NR_mpx:
goto unimplemented;
case TARGET_NR_setpgid:
}
break;
case TARGET_NR_getrusage:
- goto unimplemented;
+ {
+ struct rusage rusage;
+ struct target_rusage *target_rusage = (void *)arg2;
+ ret = get_errno(getrusage(arg1, &rusage));
+ if (!is_error(ret)) {
+ host_to_target_rusage(target_rusage, &rusage);
+ }
+ }
+ break;
case TARGET_NR_gettimeofday:
{
struct target_timeval *target_tv = (void *)arg1;
}
break;
case TARGET_NR_select:
- goto unimplemented;
+ {
+ struct target_sel_arg_struct *sel = (void *)arg1;
+ sel->n = tswapl(sel->n);
+ sel->inp = tswapl(sel->inp);
+ sel->outp = tswapl(sel->outp);
+ sel->exp = tswapl(sel->exp);
+ sel->tvp = tswapl(sel->tvp);
+ ret = do_select(sel->n, (void *)sel->inp, (void *)sel->outp,
+ (void *)sel->exp, (void *)sel->tvp);
+ }
+ break;
case TARGET_NR_symlink:
ret = get_errno(symlink((const char *)arg1, (const char *)arg2));
break;
goto unimplemented;
case TARGET_NR_readdir:
goto unimplemented;
-#ifdef TARGET_I386
case TARGET_NR_mmap:
+#if defined(TARGET_I386) || defined(TARGET_ARM)
{
uint32_t v1, v2, v3, v4, v5, v6, *vptr;
vptr = (uint32_t *)arg1;
v4 = tswap32(vptr[3]);
v5 = tswap32(vptr[4]);
v6 = tswap32(vptr[5]);
- ret = get_errno((long)mmap((void *)v1, v2, v3, v4, v5, v6));
+ ret = get_errno(target_mmap(v1, v2, v3,
+ target_to_host_bitmask(v4, mmap_flags_tbl),
+ v5, v6));
}
- break;
-#endif
-#ifdef TARGET_I386
- case TARGET_NR_mmap2:
#else
- case TARGET_NR_mmap:
+ ret = get_errno(target_mmap(arg1, arg2, arg3,
+ target_to_host_bitmask(arg4, mmap_flags_tbl),
+ arg5,
+ arg6));
#endif
- ret = get_errno((long)mmap((void *)arg1, arg2, arg3, arg4, arg5, arg6));
+ break;
+ case TARGET_NR_mmap2:
+ ret = get_errno(target_mmap(arg1, arg2, arg3,
+ target_to_host_bitmask(arg4, mmap_flags_tbl),
+ arg5,
+ arg6 << TARGET_PAGE_BITS));
break;
case TARGET_NR_munmap:
- ret = get_errno(munmap((void *)arg1, arg2));
+ ret = get_errno(target_munmap(arg1, arg2));
break;
case TARGET_NR_mprotect:
- ret = get_errno(mprotect((void *)arg1, arg2, arg3));
+ ret = get_errno(target_mprotect(arg1, arg2, arg3));
break;
case TARGET_NR_mremap:
- ret = get_errno((long)mremap((void *)arg1, arg2, arg3, arg4));
+ ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
break;
case TARGET_NR_msync:
ret = get_errno(msync((void *)arg1, arg2, arg3));
if (status_ptr)
*status_ptr = tswap32(status);
if (target_rusage) {
- target_rusage->ru_utime.tv_sec = tswapl(rusage.ru_utime.tv_sec);
- target_rusage->ru_utime.tv_usec = tswapl(rusage.ru_utime.tv_usec);
- target_rusage->ru_stime.tv_sec = tswapl(rusage.ru_stime.tv_sec);
- target_rusage->ru_stime.tv_usec = tswapl(rusage.ru_stime.tv_usec);
- target_rusage->ru_maxrss = tswapl(rusage.ru_maxrss);
- target_rusage->ru_ixrss = tswapl(rusage.ru_ixrss);
- target_rusage->ru_idrss = tswapl(rusage.ru_idrss);
- target_rusage->ru_isrss = tswapl(rusage.ru_isrss);
- target_rusage->ru_minflt = tswapl(rusage.ru_minflt);
- target_rusage->ru_majflt = tswapl(rusage.ru_majflt);
- target_rusage->ru_nswap = tswapl(rusage.ru_nswap);
- target_rusage->ru_inblock = tswapl(rusage.ru_inblock);
- target_rusage->ru_oublock = tswapl(rusage.ru_oublock);
- target_rusage->ru_msgsnd = tswapl(rusage.ru_msgsnd);
- target_rusage->ru_msgrcv = tswapl(rusage.ru_msgrcv);
- target_rusage->ru_nsignals = tswapl(rusage.ru_nsignals);
- target_rusage->ru_nvcsw = tswapl(rusage.ru_nvcsw);
- target_rusage->ru_nivcsw = tswapl(rusage.ru_nivcsw);
+ host_to_target_rusage(target_rusage, &rusage);
}
}
}
case TARGET_NR_getdents:
#if TARGET_LONG_SIZE != 4
#error not supported
-#endif
+#elif TARGET_LONG_SIZE == 4 && HOST_LONG_SIZE == 8
+ {
+ struct target_dirent *target_dirp = (void *)arg2;
+ struct dirent *dirp;
+ long count = arg3;
+
+ dirp = malloc(count);
+ if (!dirp)
+ return -ENOMEM;
+
+ ret = get_errno(sys_getdents(arg1, dirp, count));
+ if (!is_error(ret)) {
+ struct dirent *de;
+ struct target_dirent *tde;
+ int len = ret;
+ int reclen, treclen;
+ int count1, tnamelen;
+
+ count1 = 0;
+ de = dirp;
+ tde = target_dirp;
+ while (len > 0) {
+ reclen = de->d_reclen;
+ treclen = reclen - (2 * (sizeof(long) - sizeof(target_long)));
+ tde->d_reclen = tswap16(treclen);
+ tde->d_ino = tswapl(de->d_ino);
+ tde->d_off = tswapl(de->d_off);
+ tnamelen = treclen - (2 * sizeof(target_long) + 2);
+ if (tnamelen > 256)
+ tnamelen = 256;
+ strncpy(tde->d_name, de->d_name, tnamelen);
+ de = (struct dirent *)((char *)de + reclen);
+ len -= reclen;
+ tde = (struct dirent *)((char *)tde + treclen);
+ count1 += treclen;
+ }
+ ret = count1;
+ }
+ free(dirp);
+ }
+#else
{
struct dirent *dirp = (void *)arg2;
long count = arg3;
}
}
}
+#endif
break;
case TARGET_NR_getdents64:
{
case TARGET_NR_prctl:
goto unimplemented;
case TARGET_NR_pread:
- goto unimplemented;
+ page_unprotect_range((void *)arg2, arg3);
+ ret = get_errno(pread(arg1, (void *)arg2, arg3, arg4));
+ break;
case TARGET_NR_pwrite:
- goto unimplemented;
+ ret = get_errno(pwrite(arg1, (void *)arg2, arg3, arg4));
+ break;
case TARGET_NR_chown:
ret = get_errno(chown((const char *)arg1, arg2, arg3));
break;
ret = get_errno(fcntl(arg1, arg2, &fl));
break;
default:
- ret = get_errno(fcntl(arg1, arg2, arg3));
+ ret = get_errno(do_fcntl(arg1, arg2, arg3));
break;
}
break;