4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
81 #include "linux_loop.h"
84 #include "qemu-common.h"
87 #include <linux/futex.h>
88 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
89 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
91 /* XXX: Hardcode the above values. */
92 #define CLONE_NPTL_FLAGS2 0
97 //#include <linux/msdos_fs.h>
98 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
99 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
110 #define _syscall0(type,name) \
111 static type name (void) \
113 return syscall(__NR_##name); \
116 #define _syscall1(type,name,type1,arg1) \
117 static type name (type1 arg1) \
119 return syscall(__NR_##name, arg1); \
122 #define _syscall2(type,name,type1,arg1,type2,arg2) \
123 static type name (type1 arg1,type2 arg2) \
125 return syscall(__NR_##name, arg1, arg2); \
128 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
129 static type name (type1 arg1,type2 arg2,type3 arg3) \
131 return syscall(__NR_##name, arg1, arg2, arg3); \
134 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
135 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
137 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
140 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
148 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 type5,arg5,type6,arg6) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
157 #define __NR_sys_uname __NR_uname
158 #define __NR_sys_faccessat __NR_faccessat
159 #define __NR_sys_fchmodat __NR_fchmodat
160 #define __NR_sys_fchownat __NR_fchownat
161 #define __NR_sys_fstatat64 __NR_fstatat64
162 #define __NR_sys_futimesat __NR_futimesat
163 #define __NR_sys_getcwd1 __NR_getcwd
164 #define __NR_sys_getdents __NR_getdents
165 #define __NR_sys_getdents64 __NR_getdents64
166 #define __NR_sys_getpriority __NR_getpriority
167 #define __NR_sys_linkat __NR_linkat
168 #define __NR_sys_mkdirat __NR_mkdirat
169 #define __NR_sys_mknodat __NR_mknodat
170 #define __NR_sys_newfstatat __NR_newfstatat
171 #define __NR_sys_openat __NR_openat
172 #define __NR_sys_readlinkat __NR_readlinkat
173 #define __NR_sys_renameat __NR_renameat
174 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
175 #define __NR_sys_symlinkat __NR_symlinkat
176 #define __NR_sys_syslog __NR_syslog
177 #define __NR_sys_tgkill __NR_tgkill
178 #define __NR_sys_tkill __NR_tkill
179 #define __NR_sys_unlinkat __NR_unlinkat
180 #define __NR_sys_utimensat __NR_utimensat
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
187 #define __NR__llseek __NR_lseek
191 _syscall0(int, gettid)
193 /* This is a replacement for the host gettid() and must return a host
195 static int gettid(void) {
199 #if TARGET_ABI_BITS == 32
200 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
203 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
205 _syscall2(int, sys_getpriority, int, which, int, who);
206 #if !defined (__x86_64__)
207 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
208 loff_t *, res, uint, wh);
210 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
211 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
212 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
213 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
215 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
216 _syscall2(int,sys_tkill,int,tid,int,sig)
218 #ifdef __NR_exit_group
219 _syscall1(int,exit_group,int,error_code)
221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
222 _syscall1(int,set_tid_address,int *,tidptr)
224 #if defined(USE_NPTL)
225 #if defined(TARGET_NR_futex) && defined(__NR_futex)
226 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
227 const struct timespec *,timeout,int *,uaddr2,int,val3)
231 static bitmask_transtbl fcntl_flags_tbl[] = {
232 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
233 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
234 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
235 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
236 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
237 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
238 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
239 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
240 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
241 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
242 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
243 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
244 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
245 #if defined(O_DIRECT)
246 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
251 #define COPY_UTSNAME_FIELD(dest, src) \
253 /* __NEW_UTS_LEN doesn't include terminating null */ \
254 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
255 (dest)[__NEW_UTS_LEN] = '\0'; \
258 static int sys_uname(struct new_utsname *buf)
260 struct utsname uts_buf;
262 if (uname(&uts_buf) < 0)
266 * Just in case these have some differences, we
267 * translate utsname to new_utsname (which is the
268 * struct linux kernel uses).
271 bzero(buf, sizeof (*buf));
272 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
273 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
274 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
275 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
276 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
278 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
282 #undef COPY_UTSNAME_FIELD
285 static int sys_getcwd1(char *buf, size_t size)
287 if (getcwd(buf, size) == NULL) {
288 /* getcwd() sets errno */
291 return strlen(buf)+1;
296 * Host system seems to have atfile syscall stubs available. We
297 * now enable them one by one as specified by target syscall_nr.h.
300 #ifdef TARGET_NR_faccessat
301 static int sys_faccessat(int dirfd, const char *pathname, int mode)
303 return (faccessat(dirfd, pathname, mode, 0));
306 #ifdef TARGET_NR_fchmodat
307 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
309 return (fchmodat(dirfd, pathname, mode, 0));
312 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
313 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
314 gid_t group, int flags)
316 return (fchownat(dirfd, pathname, owner, group, flags));
319 #ifdef __NR_fstatat64
320 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
323 return (fstatat(dirfd, pathname, buf, flags));
326 #ifdef __NR_newfstatat
327 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
330 return (fstatat(dirfd, pathname, buf, flags));
333 #ifdef TARGET_NR_futimesat
334 static int sys_futimesat(int dirfd, const char *pathname,
335 const struct timeval times[2])
337 return (futimesat(dirfd, pathname, times));
340 #ifdef TARGET_NR_linkat
341 static int sys_linkat(int olddirfd, const char *oldpath,
342 int newdirfd, const char *newpath, int flags)
344 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
347 #ifdef TARGET_NR_mkdirat
348 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
350 return (mkdirat(dirfd, pathname, mode));
353 #ifdef TARGET_NR_mknodat
354 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
357 return (mknodat(dirfd, pathname, mode, dev));
360 #ifdef TARGET_NR_openat
361 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
364 * open(2) has extra parameter 'mode' when called with
367 if ((flags & O_CREAT) != 0) {
372 * Get the 'mode' parameter and translate it to
376 mode = va_arg(ap, mode_t);
377 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
380 return (openat(dirfd, pathname, flags, mode));
382 return (openat(dirfd, pathname, flags));
385 #ifdef TARGET_NR_readlinkat
386 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
388 return (readlinkat(dirfd, pathname, buf, bufsiz));
391 #ifdef TARGET_NR_renameat
392 static int sys_renameat(int olddirfd, const char *oldpath,
393 int newdirfd, const char *newpath)
395 return (renameat(olddirfd, oldpath, newdirfd, newpath));
398 #ifdef TARGET_NR_symlinkat
399 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
401 return (symlinkat(oldpath, newdirfd, newpath));
404 #ifdef TARGET_NR_unlinkat
405 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
407 return (unlinkat(dirfd, pathname, flags));
410 #else /* !CONFIG_ATFILE */
413 * Try direct syscalls instead
415 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
416 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
418 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
419 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
421 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
422 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
423 uid_t,owner,gid_t,group,int,flags)
425 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
426 defined(__NR_fstatat64)
427 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
428 struct stat *,buf,int,flags)
430 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
431 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
432 const struct timeval *,times)
434 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
435 defined(__NR_newfstatat)
436 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
437 struct stat *,buf,int,flags)
439 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
440 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
441 int,newdirfd,const char *,newpath,int,flags)
443 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
444 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
446 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
447 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
448 mode_t,mode,dev_t,dev)
450 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
451 defined(__NR_newfstatat)
452 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
460 char *,buf,size_t,bufsize)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
464 int,newdirfd,const char *,newpath)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat,const char *,oldpath,
468 int,newdirfd,const char *,newpath)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd, const char *pathname,
478 const struct timespec times[2], int flags)
480 if (pathname == NULL)
481 return futimens(dirfd, times);
483 return utimensat(dirfd, pathname, times, flags);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
488 const struct timespec *,tsp,int,flags)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
504 return (inotify_add_watch(fd, pathname, mask));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd, int32_t wd)
510 return (inotify_rm_watch(fd, wd));
514 /* Userspace can usually survive runtime without inotify */
515 #undef TARGET_NR_inotify_init
516 #undef TARGET_NR_inotify_add_watch
517 #undef TARGET_NR_inotify_rm_watch
518 #endif /* CONFIG_INOTIFY */
521 extern int personality(int);
522 extern int flock(int, int);
523 extern int setfsuid(int);
524 extern int setfsgid(int);
525 extern int setgroups(int, gid_t *);
527 #define ERRNO_TABLE_SIZE 1200
529 /* target_to_host_errno_table[] is initialized from
530 * host_to_target_errno_table[] in syscall_init(). */
531 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
535 * This list is the union of errno values overridden in asm-<arch>/errno.h
536 * minus the errnos that are not actually generic to all archs.
538 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
539 [EIDRM] = TARGET_EIDRM,
540 [ECHRNG] = TARGET_ECHRNG,
541 [EL2NSYNC] = TARGET_EL2NSYNC,
542 [EL3HLT] = TARGET_EL3HLT,
543 [EL3RST] = TARGET_EL3RST,
544 [ELNRNG] = TARGET_ELNRNG,
545 [EUNATCH] = TARGET_EUNATCH,
546 [ENOCSI] = TARGET_ENOCSI,
547 [EL2HLT] = TARGET_EL2HLT,
548 [EDEADLK] = TARGET_EDEADLK,
549 [ENOLCK] = TARGET_ENOLCK,
550 [EBADE] = TARGET_EBADE,
551 [EBADR] = TARGET_EBADR,
552 [EXFULL] = TARGET_EXFULL,
553 [ENOANO] = TARGET_ENOANO,
554 [EBADRQC] = TARGET_EBADRQC,
555 [EBADSLT] = TARGET_EBADSLT,
556 [EBFONT] = TARGET_EBFONT,
557 [ENOSTR] = TARGET_ENOSTR,
558 [ENODATA] = TARGET_ENODATA,
559 [ETIME] = TARGET_ETIME,
560 [ENOSR] = TARGET_ENOSR,
561 [ENONET] = TARGET_ENONET,
562 [ENOPKG] = TARGET_ENOPKG,
563 [EREMOTE] = TARGET_EREMOTE,
564 [ENOLINK] = TARGET_ENOLINK,
565 [EADV] = TARGET_EADV,
566 [ESRMNT] = TARGET_ESRMNT,
567 [ECOMM] = TARGET_ECOMM,
568 [EPROTO] = TARGET_EPROTO,
569 [EDOTDOT] = TARGET_EDOTDOT,
570 [EMULTIHOP] = TARGET_EMULTIHOP,
571 [EBADMSG] = TARGET_EBADMSG,
572 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
573 [EOVERFLOW] = TARGET_EOVERFLOW,
574 [ENOTUNIQ] = TARGET_ENOTUNIQ,
575 [EBADFD] = TARGET_EBADFD,
576 [EREMCHG] = TARGET_EREMCHG,
577 [ELIBACC] = TARGET_ELIBACC,
578 [ELIBBAD] = TARGET_ELIBBAD,
579 [ELIBSCN] = TARGET_ELIBSCN,
580 [ELIBMAX] = TARGET_ELIBMAX,
581 [ELIBEXEC] = TARGET_ELIBEXEC,
582 [EILSEQ] = TARGET_EILSEQ,
583 [ENOSYS] = TARGET_ENOSYS,
584 [ELOOP] = TARGET_ELOOP,
585 [ERESTART] = TARGET_ERESTART,
586 [ESTRPIPE] = TARGET_ESTRPIPE,
587 [ENOTEMPTY] = TARGET_ENOTEMPTY,
588 [EUSERS] = TARGET_EUSERS,
589 [ENOTSOCK] = TARGET_ENOTSOCK,
590 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
591 [EMSGSIZE] = TARGET_EMSGSIZE,
592 [EPROTOTYPE] = TARGET_EPROTOTYPE,
593 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
594 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
595 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
596 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
597 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
598 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
599 [EADDRINUSE] = TARGET_EADDRINUSE,
600 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
601 [ENETDOWN] = TARGET_ENETDOWN,
602 [ENETUNREACH] = TARGET_ENETUNREACH,
603 [ENETRESET] = TARGET_ENETRESET,
604 [ECONNABORTED] = TARGET_ECONNABORTED,
605 [ECONNRESET] = TARGET_ECONNRESET,
606 [ENOBUFS] = TARGET_ENOBUFS,
607 [EISCONN] = TARGET_EISCONN,
608 [ENOTCONN] = TARGET_ENOTCONN,
609 [EUCLEAN] = TARGET_EUCLEAN,
610 [ENOTNAM] = TARGET_ENOTNAM,
611 [ENAVAIL] = TARGET_ENAVAIL,
612 [EISNAM] = TARGET_EISNAM,
613 [EREMOTEIO] = TARGET_EREMOTEIO,
614 [ESHUTDOWN] = TARGET_ESHUTDOWN,
615 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
616 [ETIMEDOUT] = TARGET_ETIMEDOUT,
617 [ECONNREFUSED] = TARGET_ECONNREFUSED,
618 [EHOSTDOWN] = TARGET_EHOSTDOWN,
619 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
620 [EALREADY] = TARGET_EALREADY,
621 [EINPROGRESS] = TARGET_EINPROGRESS,
622 [ESTALE] = TARGET_ESTALE,
623 [ECANCELED] = TARGET_ECANCELED,
624 [ENOMEDIUM] = TARGET_ENOMEDIUM,
625 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
627 [ENOKEY] = TARGET_ENOKEY,
630 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
633 [EKEYREVOKED] = TARGET_EKEYREVOKED,
636 [EKEYREJECTED] = TARGET_EKEYREJECTED,
639 [EOWNERDEAD] = TARGET_EOWNERDEAD,
641 #ifdef ENOTRECOVERABLE
642 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
646 static inline int host_to_target_errno(int err)
648 if(host_to_target_errno_table[err])
649 return host_to_target_errno_table[err];
653 static inline int target_to_host_errno(int err)
655 if (target_to_host_errno_table[err])
656 return target_to_host_errno_table[err];
660 static inline abi_long get_errno(abi_long ret)
663 return -host_to_target_errno(errno);
668 static inline int is_error(abi_long ret)
670 return (abi_ulong)ret >= (abi_ulong)(-4096);
673 char *target_strerror(int err)
675 return strerror(target_to_host_errno(err));
678 static abi_ulong target_brk;
679 static abi_ulong target_original_brk;
681 void target_set_brk(abi_ulong new_brk)
683 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
686 /* do_brk() must return target values and target errnos. */
687 abi_long do_brk(abi_ulong new_brk)
690 abi_long mapped_addr;
695 if (new_brk < target_original_brk)
698 brk_page = HOST_PAGE_ALIGN(target_brk);
700 /* If the new brk is less than this, set it and we're done... */
701 if (new_brk < brk_page) {
702 target_brk = new_brk;
706 /* We need to allocate more memory after the brk... */
707 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
708 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
709 PROT_READ|PROT_WRITE,
710 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
712 if (!is_error(mapped_addr))
713 target_brk = new_brk;
718 static inline abi_long copy_from_user_fdset(fd_set *fds,
719 abi_ulong target_fds_addr,
723 abi_ulong b, *target_fds;
725 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
726 if (!(target_fds = lock_user(VERIFY_READ,
728 sizeof(abi_ulong) * nw,
730 return -TARGET_EFAULT;
734 for (i = 0; i < nw; i++) {
735 /* grab the abi_ulong */
736 __get_user(b, &target_fds[i]);
737 for (j = 0; j < TARGET_ABI_BITS; j++) {
738 /* check the bit inside the abi_ulong */
745 unlock_user(target_fds, target_fds_addr, 0);
750 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
756 abi_ulong *target_fds;
758 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
759 if (!(target_fds = lock_user(VERIFY_WRITE,
761 sizeof(abi_ulong) * nw,
763 return -TARGET_EFAULT;
766 for (i = 0; i < nw; i++) {
768 for (j = 0; j < TARGET_ABI_BITS; j++) {
769 v |= ((FD_ISSET(k, fds) != 0) << j);
772 __put_user(v, &target_fds[i]);
775 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
780 #if defined(__alpha__)
786 static inline abi_long host_to_target_clock_t(long ticks)
788 #if HOST_HZ == TARGET_HZ
791 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
795 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
796 const struct rusage *rusage)
798 struct target_rusage *target_rusage;
800 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
801 return -TARGET_EFAULT;
802 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
803 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
804 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
805 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
806 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
807 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
808 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
809 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
810 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
811 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
812 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
813 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
814 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
815 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
816 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
817 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
818 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
819 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
820 unlock_user_struct(target_rusage, target_addr, 1);
825 static inline abi_long copy_from_user_timeval(struct timeval *tv,
826 abi_ulong target_tv_addr)
828 struct target_timeval *target_tv;
830 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
831 return -TARGET_EFAULT;
833 __get_user(tv->tv_sec, &target_tv->tv_sec);
834 __get_user(tv->tv_usec, &target_tv->tv_usec);
836 unlock_user_struct(target_tv, target_tv_addr, 0);
841 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
842 const struct timeval *tv)
844 struct target_timeval *target_tv;
846 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
847 return -TARGET_EFAULT;
849 __put_user(tv->tv_sec, &target_tv->tv_sec);
850 __put_user(tv->tv_usec, &target_tv->tv_usec);
852 unlock_user_struct(target_tv, target_tv_addr, 1);
857 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
858 abi_ulong target_mq_attr_addr)
860 struct target_mq_attr *target_mq_attr;
862 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
863 target_mq_attr_addr, 1))
864 return -TARGET_EFAULT;
866 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
867 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
868 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
869 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
871 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
876 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
877 const struct mq_attr *attr)
879 struct target_mq_attr *target_mq_attr;
881 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
882 target_mq_attr_addr, 0))
883 return -TARGET_EFAULT;
885 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
886 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
887 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
888 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
890 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
895 /* do_select() must return target values and target errnos. */
896 static abi_long do_select(int n,
897 abi_ulong rfd_addr, abi_ulong wfd_addr,
898 abi_ulong efd_addr, abi_ulong target_tv_addr)
900 fd_set rfds, wfds, efds;
901 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
902 struct timeval tv, *tv_ptr;
906 if (copy_from_user_fdset(&rfds, rfd_addr, n))
907 return -TARGET_EFAULT;
913 if (copy_from_user_fdset(&wfds, wfd_addr, n))
914 return -TARGET_EFAULT;
920 if (copy_from_user_fdset(&efds, efd_addr, n))
921 return -TARGET_EFAULT;
927 if (target_tv_addr) {
928 if (copy_from_user_timeval(&tv, target_tv_addr))
929 return -TARGET_EFAULT;
935 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
937 if (!is_error(ret)) {
938 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
939 return -TARGET_EFAULT;
940 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
941 return -TARGET_EFAULT;
942 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
943 return -TARGET_EFAULT;
945 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
946 return -TARGET_EFAULT;
952 static abi_long do_pipe2(int host_pipe[], int flags)
955 return pipe2(host_pipe, flags);
961 static abi_long do_pipe(void *cpu_env, int pipedes, int flags)
965 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
968 return get_errno(ret);
969 #if defined(TARGET_MIPS)
970 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
972 #elif defined(TARGET_SH4)
973 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
976 if (put_user_s32(host_pipe[0], pipedes)
977 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
978 return -TARGET_EFAULT;
980 return get_errno(ret);
983 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
984 abi_ulong target_addr,
987 struct target_ip_mreqn *target_smreqn;
989 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
991 return -TARGET_EFAULT;
992 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
993 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
994 if (len == sizeof(struct target_ip_mreqn))
995 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
996 unlock_user(target_smreqn, target_addr, 0);
1001 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1002 abi_ulong target_addr,
1005 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1006 sa_family_t sa_family;
1007 struct target_sockaddr *target_saddr;
1009 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1011 return -TARGET_EFAULT;
1013 sa_family = tswap16(target_saddr->sa_family);
1015 /* Oops. The caller might send a incomplete sun_path; sun_path
1016 * must be terminated by \0 (see the manual page), but
1017 * unfortunately it is quite common to specify sockaddr_un
1018 * length as "strlen(x->sun_path)" while it should be
1019 * "strlen(...) + 1". We'll fix that here if needed.
1020 * Linux kernel has a similar feature.
1023 if (sa_family == AF_UNIX) {
1024 if (len < unix_maxlen && len > 0) {
1025 char *cp = (char*)target_saddr;
1027 if ( cp[len-1] && !cp[len] )
1030 if (len > unix_maxlen)
1034 memcpy(addr, target_saddr, len);
1035 addr->sa_family = sa_family;
1036 unlock_user(target_saddr, target_addr, 0);
1041 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1042 struct sockaddr *addr,
1045 struct target_sockaddr *target_saddr;
1047 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1049 return -TARGET_EFAULT;
1050 memcpy(target_saddr, addr, len);
1051 target_saddr->sa_family = tswap16(addr->sa_family);
1052 unlock_user(target_saddr, target_addr, len);
1057 /* ??? Should this also swap msgh->name? */
1058 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1059 struct target_msghdr *target_msgh)
1061 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1062 abi_long msg_controllen;
1063 abi_ulong target_cmsg_addr;
1064 struct target_cmsghdr *target_cmsg;
1065 socklen_t space = 0;
1067 msg_controllen = tswapl(target_msgh->msg_controllen);
1068 if (msg_controllen < sizeof (struct target_cmsghdr))
1070 target_cmsg_addr = tswapl(target_msgh->msg_control);
1071 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1073 return -TARGET_EFAULT;
1075 while (cmsg && target_cmsg) {
1076 void *data = CMSG_DATA(cmsg);
1077 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1079 int len = tswapl(target_cmsg->cmsg_len)
1080 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1082 space += CMSG_SPACE(len);
1083 if (space > msgh->msg_controllen) {
1084 space -= CMSG_SPACE(len);
1085 gemu_log("Host cmsg overflow\n");
1089 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1090 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1091 cmsg->cmsg_len = CMSG_LEN(len);
1093 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1094 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1095 memcpy(data, target_data, len);
1097 int *fd = (int *)data;
1098 int *target_fd = (int *)target_data;
1099 int i, numfds = len / sizeof(int);
1101 for (i = 0; i < numfds; i++)
1102 fd[i] = tswap32(target_fd[i]);
1105 cmsg = CMSG_NXTHDR(msgh, cmsg);
1106 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1108 unlock_user(target_cmsg, target_cmsg_addr, 0);
1110 msgh->msg_controllen = space;
1114 /* ??? Should this also swap msgh->name? */
1115 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1116 struct msghdr *msgh)
1118 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1119 abi_long msg_controllen;
1120 abi_ulong target_cmsg_addr;
1121 struct target_cmsghdr *target_cmsg;
1122 socklen_t space = 0;
1124 msg_controllen = tswapl(target_msgh->msg_controllen);
1125 if (msg_controllen < sizeof (struct target_cmsghdr))
1127 target_cmsg_addr = tswapl(target_msgh->msg_control);
1128 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1130 return -TARGET_EFAULT;
1132 while (cmsg && target_cmsg) {
1133 void *data = CMSG_DATA(cmsg);
1134 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1136 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1138 space += TARGET_CMSG_SPACE(len);
1139 if (space > msg_controllen) {
1140 space -= TARGET_CMSG_SPACE(len);
1141 gemu_log("Target cmsg overflow\n");
1145 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1146 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1147 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1149 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1150 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1151 memcpy(target_data, data, len);
1153 int *fd = (int *)data;
1154 int *target_fd = (int *)target_data;
1155 int i, numfds = len / sizeof(int);
1157 for (i = 0; i < numfds; i++)
1158 target_fd[i] = tswap32(fd[i]);
1161 cmsg = CMSG_NXTHDR(msgh, cmsg);
1162 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1164 unlock_user(target_cmsg, target_cmsg_addr, space);
1166 target_msgh->msg_controllen = tswapl(space);
1170 /* do_setsockopt() Must return target values and target errnos. */
1171 static abi_long do_setsockopt(int sockfd, int level, int optname,
1172 abi_ulong optval_addr, socklen_t optlen)
1176 struct ip_mreqn *ip_mreq;
1177 struct ip_mreq_source *ip_mreq_source;
1181 /* TCP options all take an 'int' value. */
1182 if (optlen < sizeof(uint32_t))
1183 return -TARGET_EINVAL;
1185 if (get_user_u32(val, optval_addr))
1186 return -TARGET_EFAULT;
1187 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1194 case IP_ROUTER_ALERT:
1198 case IP_MTU_DISCOVER:
1204 case IP_MULTICAST_TTL:
1205 case IP_MULTICAST_LOOP:
1207 if (optlen >= sizeof(uint32_t)) {
1208 if (get_user_u32(val, optval_addr))
1209 return -TARGET_EFAULT;
1210 } else if (optlen >= 1) {
1211 if (get_user_u8(val, optval_addr))
1212 return -TARGET_EFAULT;
1214 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1216 case IP_ADD_MEMBERSHIP:
1217 case IP_DROP_MEMBERSHIP:
1218 if (optlen < sizeof (struct target_ip_mreq) ||
1219 optlen > sizeof (struct target_ip_mreqn))
1220 return -TARGET_EINVAL;
1222 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1223 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1224 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1227 case IP_BLOCK_SOURCE:
1228 case IP_UNBLOCK_SOURCE:
1229 case IP_ADD_SOURCE_MEMBERSHIP:
1230 case IP_DROP_SOURCE_MEMBERSHIP:
1231 if (optlen != sizeof (struct target_ip_mreq_source))
1232 return -TARGET_EINVAL;
1234 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1235 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1236 unlock_user (ip_mreq_source, optval_addr, 0);
1243 case TARGET_SOL_SOCKET:
1245 /* Options with 'int' argument. */
1246 case TARGET_SO_DEBUG:
1249 case TARGET_SO_REUSEADDR:
1250 optname = SO_REUSEADDR;
1252 case TARGET_SO_TYPE:
1255 case TARGET_SO_ERROR:
1258 case TARGET_SO_DONTROUTE:
1259 optname = SO_DONTROUTE;
1261 case TARGET_SO_BROADCAST:
1262 optname = SO_BROADCAST;
1264 case TARGET_SO_SNDBUF:
1265 optname = SO_SNDBUF;
1267 case TARGET_SO_RCVBUF:
1268 optname = SO_RCVBUF;
1270 case TARGET_SO_KEEPALIVE:
1271 optname = SO_KEEPALIVE;
1273 case TARGET_SO_OOBINLINE:
1274 optname = SO_OOBINLINE;
1276 case TARGET_SO_NO_CHECK:
1277 optname = SO_NO_CHECK;
1279 case TARGET_SO_PRIORITY:
1280 optname = SO_PRIORITY;
1283 case TARGET_SO_BSDCOMPAT:
1284 optname = SO_BSDCOMPAT;
1287 case TARGET_SO_PASSCRED:
1288 optname = SO_PASSCRED;
1290 case TARGET_SO_TIMESTAMP:
1291 optname = SO_TIMESTAMP;
1293 case TARGET_SO_RCVLOWAT:
1294 optname = SO_RCVLOWAT;
1296 case TARGET_SO_RCVTIMEO:
1297 optname = SO_RCVTIMEO;
1299 case TARGET_SO_SNDTIMEO:
1300 optname = SO_SNDTIMEO;
1306 if (optlen < sizeof(uint32_t))
1307 return -TARGET_EINVAL;
1309 if (get_user_u32(val, optval_addr))
1310 return -TARGET_EFAULT;
1311 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1315 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1316 ret = -TARGET_ENOPROTOOPT;
1321 /* do_getsockopt() Must return target values and target errnos. */
1322 static abi_long do_getsockopt(int sockfd, int level, int optname,
1323 abi_ulong optval_addr, abi_ulong optlen)
1330 case TARGET_SOL_SOCKET:
1333 case TARGET_SO_LINGER:
1334 case TARGET_SO_RCVTIMEO:
1335 case TARGET_SO_SNDTIMEO:
1336 case TARGET_SO_PEERCRED:
1337 case TARGET_SO_PEERNAME:
1338 /* These don't just return a single integer */
1345 /* TCP options all take an 'int' value. */
1347 if (get_user_u32(len, optlen))
1348 return -TARGET_EFAULT;
1350 return -TARGET_EINVAL;
1352 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1359 if (put_user_u32(val, optval_addr))
1360 return -TARGET_EFAULT;
1362 if (put_user_u8(val, optval_addr))
1363 return -TARGET_EFAULT;
1365 if (put_user_u32(len, optlen))
1366 return -TARGET_EFAULT;
1373 case IP_ROUTER_ALERT:
1377 case IP_MTU_DISCOVER:
1383 case IP_MULTICAST_TTL:
1384 case IP_MULTICAST_LOOP:
1385 if (get_user_u32(len, optlen))
1386 return -TARGET_EFAULT;
1388 return -TARGET_EINVAL;
1390 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1393 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1395 if (put_user_u32(len, optlen)
1396 || put_user_u8(val, optval_addr))
1397 return -TARGET_EFAULT;
1399 if (len > sizeof(int))
1401 if (put_user_u32(len, optlen)
1402 || put_user_u32(val, optval_addr))
1403 return -TARGET_EFAULT;
1407 ret = -TARGET_ENOPROTOOPT;
1413 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1415 ret = -TARGET_EOPNOTSUPP;
1422 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1423 * other lock functions have a return code of 0 for failure.
1425 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1426 int count, int copy)
1428 struct target_iovec *target_vec;
1432 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1434 return -TARGET_EFAULT;
1435 for(i = 0;i < count; i++) {
1436 base = tswapl(target_vec[i].iov_base);
1437 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1438 if (vec[i].iov_len != 0) {
1439 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1440 /* Don't check lock_user return value. We must call writev even
1441 if a element has invalid base address. */
1443 /* zero length pointer is ignored */
1444 vec[i].iov_base = NULL;
1447 unlock_user (target_vec, target_addr, 0);
1451 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1452 int count, int copy)
1454 struct target_iovec *target_vec;
1458 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1460 return -TARGET_EFAULT;
1461 for(i = 0;i < count; i++) {
1462 if (target_vec[i].iov_base) {
1463 base = tswapl(target_vec[i].iov_base);
1464 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1467 unlock_user (target_vec, target_addr, 0);
1472 /* do_socket() Must return target values and target errnos. */
1473 static abi_long do_socket(int domain, int type, int protocol)
1475 #if defined(TARGET_MIPS)
1477 case TARGET_SOCK_DGRAM:
1480 case TARGET_SOCK_STREAM:
1483 case TARGET_SOCK_RAW:
1486 case TARGET_SOCK_RDM:
1489 case TARGET_SOCK_SEQPACKET:
1490 type = SOCK_SEQPACKET;
1492 case TARGET_SOCK_PACKET:
1497 if (domain == PF_NETLINK)
1498 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1499 return get_errno(socket(domain, type, protocol));
1502 /* do_bind() Must return target values and target errnos. */
1503 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1509 return -TARGET_EINVAL;
1511 addr = alloca(addrlen+1);
1513 target_to_host_sockaddr(addr, target_addr, addrlen);
1514 return get_errno(bind(sockfd, addr, addrlen));
1517 /* do_connect() Must return target values and target errnos. */
1518 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1524 return -TARGET_EINVAL;
1526 addr = alloca(addrlen);
1528 target_to_host_sockaddr(addr, target_addr, addrlen);
1529 return get_errno(connect(sockfd, addr, addrlen));
1532 /* do_sendrecvmsg() Must return target values and target errnos. */
1533 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1534 int flags, int send)
1537 struct target_msghdr *msgp;
1541 abi_ulong target_vec;
1544 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1548 return -TARGET_EFAULT;
1549 if (msgp->msg_name) {
1550 msg.msg_namelen = tswap32(msgp->msg_namelen);
1551 msg.msg_name = alloca(msg.msg_namelen);
1552 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1555 msg.msg_name = NULL;
1556 msg.msg_namelen = 0;
1558 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1559 msg.msg_control = alloca(msg.msg_controllen);
1560 msg.msg_flags = tswap32(msgp->msg_flags);
1562 count = tswapl(msgp->msg_iovlen);
1563 vec = alloca(count * sizeof(struct iovec));
1564 target_vec = tswapl(msgp->msg_iov);
1565 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1566 msg.msg_iovlen = count;
1570 ret = target_to_host_cmsg(&msg, msgp);
1572 ret = get_errno(sendmsg(fd, &msg, flags));
1574 ret = get_errno(recvmsg(fd, &msg, flags));
1575 if (!is_error(ret)) {
1577 ret = host_to_target_cmsg(msgp, &msg);
1582 unlock_iovec(vec, target_vec, count, !send);
1583 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1587 /* do_accept() Must return target values and target errnos. */
1588 static abi_long do_accept(int fd, abi_ulong target_addr,
1589 abi_ulong target_addrlen_addr)
1595 if (target_addr == 0)
1596 return get_errno(accept(fd, NULL, NULL));
1598 if (get_user_u32(addrlen, target_addrlen_addr))
1599 return -TARGET_EFAULT;
1602 return -TARGET_EINVAL;
1604 addr = alloca(addrlen);
1606 ret = get_errno(accept(fd, addr, &addrlen));
1607 if (!is_error(ret)) {
1608 host_to_target_sockaddr(target_addr, addr, addrlen);
1609 if (put_user_u32(addrlen, target_addrlen_addr))
1610 ret = -TARGET_EFAULT;
1615 /* do_getpeername() Must return target values and target errnos. */
1616 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1617 abi_ulong target_addrlen_addr)
1623 if (get_user_u32(addrlen, target_addrlen_addr))
1624 return -TARGET_EFAULT;
1627 return -TARGET_EINVAL;
1629 addr = alloca(addrlen);
1631 ret = get_errno(getpeername(fd, addr, &addrlen));
1632 if (!is_error(ret)) {
1633 host_to_target_sockaddr(target_addr, addr, addrlen);
1634 if (put_user_u32(addrlen, target_addrlen_addr))
1635 ret = -TARGET_EFAULT;
1640 /* do_getsockname() Must return target values and target errnos. */
1641 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1642 abi_ulong target_addrlen_addr)
1648 if (target_addr == 0)
1649 return get_errno(accept(fd, NULL, NULL));
1651 if (get_user_u32(addrlen, target_addrlen_addr))
1652 return -TARGET_EFAULT;
1655 return -TARGET_EINVAL;
1657 addr = alloca(addrlen);
1659 ret = get_errno(getsockname(fd, addr, &addrlen));
1660 if (!is_error(ret)) {
1661 host_to_target_sockaddr(target_addr, addr, addrlen);
1662 if (put_user_u32(addrlen, target_addrlen_addr))
1663 ret = -TARGET_EFAULT;
1668 /* do_socketpair() Must return target values and target errnos. */
1669 static abi_long do_socketpair(int domain, int type, int protocol,
1670 abi_ulong target_tab_addr)
1675 ret = get_errno(socketpair(domain, type, protocol, tab));
1676 if (!is_error(ret)) {
1677 if (put_user_s32(tab[0], target_tab_addr)
1678 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1679 ret = -TARGET_EFAULT;
1684 /* do_sendto() Must return target values and target errnos. */
1685 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1686 abi_ulong target_addr, socklen_t addrlen)
1693 return -TARGET_EINVAL;
1695 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1697 return -TARGET_EFAULT;
1699 addr = alloca(addrlen);
1700 target_to_host_sockaddr(addr, target_addr, addrlen);
1701 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1703 ret = get_errno(send(fd, host_msg, len, flags));
1705 unlock_user(host_msg, msg, 0);
1709 /* do_recvfrom() Must return target values and target errnos. */
1710 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1711 abi_ulong target_addr,
1712 abi_ulong target_addrlen)
1719 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1721 return -TARGET_EFAULT;
1723 if (get_user_u32(addrlen, target_addrlen)) {
1724 ret = -TARGET_EFAULT;
1728 ret = -TARGET_EINVAL;
1731 addr = alloca(addrlen);
1732 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1734 addr = NULL; /* To keep compiler quiet. */
1735 ret = get_errno(recv(fd, host_msg, len, flags));
1737 if (!is_error(ret)) {
1739 host_to_target_sockaddr(target_addr, addr, addrlen);
1740 if (put_user_u32(addrlen, target_addrlen)) {
1741 ret = -TARGET_EFAULT;
1745 unlock_user(host_msg, msg, len);
1748 unlock_user(host_msg, msg, 0);
1753 #ifdef TARGET_NR_socketcall
1754 /* do_socketcall() Must return target values and target errnos. */
1755 static abi_long do_socketcall(int num, abi_ulong vptr)
1758 const int n = sizeof(abi_ulong);
1763 int domain, type, protocol;
1765 if (get_user_s32(domain, vptr)
1766 || get_user_s32(type, vptr + n)
1767 || get_user_s32(protocol, vptr + 2 * n))
1768 return -TARGET_EFAULT;
1770 ret = do_socket(domain, type, protocol);
1776 abi_ulong target_addr;
1779 if (get_user_s32(sockfd, vptr)
1780 || get_user_ual(target_addr, vptr + n)
1781 || get_user_u32(addrlen, vptr + 2 * n))
1782 return -TARGET_EFAULT;
1784 ret = do_bind(sockfd, target_addr, addrlen);
1787 case SOCKOP_connect:
1790 abi_ulong target_addr;
1793 if (get_user_s32(sockfd, vptr)
1794 || get_user_ual(target_addr, vptr + n)
1795 || get_user_u32(addrlen, vptr + 2 * n))
1796 return -TARGET_EFAULT;
1798 ret = do_connect(sockfd, target_addr, addrlen);
1803 int sockfd, backlog;
1805 if (get_user_s32(sockfd, vptr)
1806 || get_user_s32(backlog, vptr + n))
1807 return -TARGET_EFAULT;
1809 ret = get_errno(listen(sockfd, backlog));
1815 abi_ulong target_addr, target_addrlen;
1817 if (get_user_s32(sockfd, vptr)
1818 || get_user_ual(target_addr, vptr + n)
1819 || get_user_u32(target_addrlen, vptr + 2 * n))
1820 return -TARGET_EFAULT;
1822 ret = do_accept(sockfd, target_addr, target_addrlen);
1825 case SOCKOP_getsockname:
1828 abi_ulong target_addr, target_addrlen;
1830 if (get_user_s32(sockfd, vptr)
1831 || get_user_ual(target_addr, vptr + n)
1832 || get_user_u32(target_addrlen, vptr + 2 * n))
1833 return -TARGET_EFAULT;
1835 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1838 case SOCKOP_getpeername:
1841 abi_ulong target_addr, target_addrlen;
1843 if (get_user_s32(sockfd, vptr)
1844 || get_user_ual(target_addr, vptr + n)
1845 || get_user_u32(target_addrlen, vptr + 2 * n))
1846 return -TARGET_EFAULT;
1848 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1851 case SOCKOP_socketpair:
1853 int domain, type, protocol;
1856 if (get_user_s32(domain, vptr)
1857 || get_user_s32(type, vptr + n)
1858 || get_user_s32(protocol, vptr + 2 * n)
1859 || get_user_ual(tab, vptr + 3 * n))
1860 return -TARGET_EFAULT;
1862 ret = do_socketpair(domain, type, protocol, tab);
1872 if (get_user_s32(sockfd, vptr)
1873 || get_user_ual(msg, vptr + n)
1874 || get_user_ual(len, vptr + 2 * n)
1875 || get_user_s32(flags, vptr + 3 * n))
1876 return -TARGET_EFAULT;
1878 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1888 if (get_user_s32(sockfd, vptr)
1889 || get_user_ual(msg, vptr + n)
1890 || get_user_ual(len, vptr + 2 * n)
1891 || get_user_s32(flags, vptr + 3 * n))
1892 return -TARGET_EFAULT;
1894 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1906 if (get_user_s32(sockfd, vptr)
1907 || get_user_ual(msg, vptr + n)
1908 || get_user_ual(len, vptr + 2 * n)
1909 || get_user_s32(flags, vptr + 3 * n)
1910 || get_user_ual(addr, vptr + 4 * n)
1911 || get_user_u32(addrlen, vptr + 5 * n))
1912 return -TARGET_EFAULT;
1914 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1917 case SOCKOP_recvfrom:
1926 if (get_user_s32(sockfd, vptr)
1927 || get_user_ual(msg, vptr + n)
1928 || get_user_ual(len, vptr + 2 * n)
1929 || get_user_s32(flags, vptr + 3 * n)
1930 || get_user_ual(addr, vptr + 4 * n)
1931 || get_user_u32(addrlen, vptr + 5 * n))
1932 return -TARGET_EFAULT;
1934 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1937 case SOCKOP_shutdown:
1941 if (get_user_s32(sockfd, vptr)
1942 || get_user_s32(how, vptr + n))
1943 return -TARGET_EFAULT;
1945 ret = get_errno(shutdown(sockfd, how));
1948 case SOCKOP_sendmsg:
1949 case SOCKOP_recvmsg:
1952 abi_ulong target_msg;
1955 if (get_user_s32(fd, vptr)
1956 || get_user_ual(target_msg, vptr + n)
1957 || get_user_s32(flags, vptr + 2 * n))
1958 return -TARGET_EFAULT;
1960 ret = do_sendrecvmsg(fd, target_msg, flags,
1961 (num == SOCKOP_sendmsg));
1964 case SOCKOP_setsockopt:
1972 if (get_user_s32(sockfd, vptr)
1973 || get_user_s32(level, vptr + n)
1974 || get_user_s32(optname, vptr + 2 * n)
1975 || get_user_ual(optval, vptr + 3 * n)
1976 || get_user_u32(optlen, vptr + 4 * n))
1977 return -TARGET_EFAULT;
1979 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1982 case SOCKOP_getsockopt:
1990 if (get_user_s32(sockfd, vptr)
1991 || get_user_s32(level, vptr + n)
1992 || get_user_s32(optname, vptr + 2 * n)
1993 || get_user_ual(optval, vptr + 3 * n)
1994 || get_user_u32(optlen, vptr + 4 * n))
1995 return -TARGET_EFAULT;
1997 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2001 gemu_log("Unsupported socketcall: %d\n", num);
2002 ret = -TARGET_ENOSYS;
2009 #define N_SHM_REGIONS 32
2011 static struct shm_region {
2014 } shm_regions[N_SHM_REGIONS];
2016 struct target_ipc_perm
2023 unsigned short int mode;
2024 unsigned short int __pad1;
2025 unsigned short int __seq;
2026 unsigned short int __pad2;
2027 abi_ulong __unused1;
2028 abi_ulong __unused2;
2031 struct target_semid_ds
2033 struct target_ipc_perm sem_perm;
2034 abi_ulong sem_otime;
2035 abi_ulong __unused1;
2036 abi_ulong sem_ctime;
2037 abi_ulong __unused2;
2038 abi_ulong sem_nsems;
2039 abi_ulong __unused3;
2040 abi_ulong __unused4;
2043 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2044 abi_ulong target_addr)
2046 struct target_ipc_perm *target_ip;
2047 struct target_semid_ds *target_sd;
2049 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2050 return -TARGET_EFAULT;
2051 target_ip=&(target_sd->sem_perm);
2052 host_ip->__key = tswapl(target_ip->__key);
2053 host_ip->uid = tswapl(target_ip->uid);
2054 host_ip->gid = tswapl(target_ip->gid);
2055 host_ip->cuid = tswapl(target_ip->cuid);
2056 host_ip->cgid = tswapl(target_ip->cgid);
2057 host_ip->mode = tswapl(target_ip->mode);
2058 unlock_user_struct(target_sd, target_addr, 0);
2062 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2063 struct ipc_perm *host_ip)
2065 struct target_ipc_perm *target_ip;
2066 struct target_semid_ds *target_sd;
2068 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2069 return -TARGET_EFAULT;
2070 target_ip = &(target_sd->sem_perm);
2071 target_ip->__key = tswapl(host_ip->__key);
2072 target_ip->uid = tswapl(host_ip->uid);
2073 target_ip->gid = tswapl(host_ip->gid);
2074 target_ip->cuid = tswapl(host_ip->cuid);
2075 target_ip->cgid = tswapl(host_ip->cgid);
2076 target_ip->mode = tswapl(host_ip->mode);
2077 unlock_user_struct(target_sd, target_addr, 1);
2081 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2082 abi_ulong target_addr)
2084 struct target_semid_ds *target_sd;
2086 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2087 return -TARGET_EFAULT;
2088 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2089 return -TARGET_EFAULT;
2090 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2091 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2092 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2093 unlock_user_struct(target_sd, target_addr, 0);
2097 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2098 struct semid_ds *host_sd)
2100 struct target_semid_ds *target_sd;
2102 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2103 return -TARGET_EFAULT;
2104 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2105 return -TARGET_EFAULT;;
2106 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2107 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2108 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2109 unlock_user_struct(target_sd, target_addr, 1);
2113 struct target_seminfo {
2126 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2127 struct seminfo *host_seminfo)
2129 struct target_seminfo *target_seminfo;
2130 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2131 return -TARGET_EFAULT;
2132 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2133 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2134 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2135 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2136 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2137 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2138 __put_user(host_seminfo->semume, &target_seminfo->semume);
2139 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2140 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2141 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2142 unlock_user_struct(target_seminfo, target_addr, 1);
2148 struct semid_ds *buf;
2149 unsigned short *array;
2150 struct seminfo *__buf;
2153 union target_semun {
2160 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2161 abi_ulong target_addr)
2164 unsigned short *array;
2166 struct semid_ds semid_ds;
2169 semun.buf = &semid_ds;
2171 ret = semctl(semid, 0, IPC_STAT, semun);
2173 return get_errno(ret);
2175 nsems = semid_ds.sem_nsems;
2177 *host_array = malloc(nsems*sizeof(unsigned short));
2178 array = lock_user(VERIFY_READ, target_addr,
2179 nsems*sizeof(unsigned short), 1);
2181 return -TARGET_EFAULT;
2183 for(i=0; i<nsems; i++) {
2184 __get_user((*host_array)[i], &array[i]);
2186 unlock_user(array, target_addr, 0);
2191 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2192 unsigned short **host_array)
2195 unsigned short *array;
2197 struct semid_ds semid_ds;
2200 semun.buf = &semid_ds;
2202 ret = semctl(semid, 0, IPC_STAT, semun);
2204 return get_errno(ret);
2206 nsems = semid_ds.sem_nsems;
2208 array = lock_user(VERIFY_WRITE, target_addr,
2209 nsems*sizeof(unsigned short), 0);
2211 return -TARGET_EFAULT;
2213 for(i=0; i<nsems; i++) {
2214 __put_user((*host_array)[i], &array[i]);
2217 unlock_user(array, target_addr, 1);
2222 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2223 union target_semun target_su)
2226 struct semid_ds dsarg;
2227 unsigned short *array;
2228 struct seminfo seminfo;
2229 abi_long ret = -TARGET_EINVAL;
2236 arg.val = tswapl(target_su.val);
2237 ret = get_errno(semctl(semid, semnum, cmd, arg));
2238 target_su.val = tswapl(arg.val);
2242 err = target_to_host_semarray(semid, &array, target_su.array);
2246 ret = get_errno(semctl(semid, semnum, cmd, arg));
2247 err = host_to_target_semarray(semid, target_su.array, &array);
2254 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2258 ret = get_errno(semctl(semid, semnum, cmd, arg));
2259 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2265 arg.__buf = &seminfo;
2266 ret = get_errno(semctl(semid, semnum, cmd, arg));
2267 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2275 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2282 struct target_sembuf {
2283 unsigned short sem_num;
2288 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2289 abi_ulong target_addr,
2292 struct target_sembuf *target_sembuf;
2295 target_sembuf = lock_user(VERIFY_READ, target_addr,
2296 nsops*sizeof(struct target_sembuf), 1);
2298 return -TARGET_EFAULT;
2300 for(i=0; i<nsops; i++) {
2301 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2302 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2303 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2306 unlock_user(target_sembuf, target_addr, 0);
2311 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2313 struct sembuf sops[nsops];
2315 if (target_to_host_sembuf(sops, ptr, nsops))
2316 return -TARGET_EFAULT;
2318 return semop(semid, sops, nsops);
2321 struct target_msqid_ds
2323 struct target_ipc_perm msg_perm;
2324 abi_ulong msg_stime;
2325 #if TARGET_ABI_BITS == 32
2326 abi_ulong __unused1;
2328 abi_ulong msg_rtime;
2329 #if TARGET_ABI_BITS == 32
2330 abi_ulong __unused2;
2332 abi_ulong msg_ctime;
2333 #if TARGET_ABI_BITS == 32
2334 abi_ulong __unused3;
2336 abi_ulong __msg_cbytes;
2338 abi_ulong msg_qbytes;
2339 abi_ulong msg_lspid;
2340 abi_ulong msg_lrpid;
2341 abi_ulong __unused4;
2342 abi_ulong __unused5;
2345 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2346 abi_ulong target_addr)
2348 struct target_msqid_ds *target_md;
2350 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2351 return -TARGET_EFAULT;
2352 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2353 return -TARGET_EFAULT;
2354 host_md->msg_stime = tswapl(target_md->msg_stime);
2355 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2356 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2357 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2358 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2359 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2360 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2361 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2362 unlock_user_struct(target_md, target_addr, 0);
2366 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2367 struct msqid_ds *host_md)
2369 struct target_msqid_ds *target_md;
2371 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2372 return -TARGET_EFAULT;
2373 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2374 return -TARGET_EFAULT;
2375 target_md->msg_stime = tswapl(host_md->msg_stime);
2376 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2377 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2378 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2379 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2380 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2381 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2382 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2383 unlock_user_struct(target_md, target_addr, 1);
2387 struct target_msginfo {
2395 unsigned short int msgseg;
2398 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2399 struct msginfo *host_msginfo)
2401 struct target_msginfo *target_msginfo;
2402 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2403 return -TARGET_EFAULT;
2404 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2405 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2406 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2407 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2408 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2409 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2410 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2411 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2412 unlock_user_struct(target_msginfo, target_addr, 1);
2416 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2418 struct msqid_ds dsarg;
2419 struct msginfo msginfo;
2420 abi_long ret = -TARGET_EINVAL;
2428 if (target_to_host_msqid_ds(&dsarg,ptr))
2429 return -TARGET_EFAULT;
2430 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2431 if (host_to_target_msqid_ds(ptr,&dsarg))
2432 return -TARGET_EFAULT;
2435 ret = get_errno(msgctl(msgid, cmd, NULL));
2439 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2440 if (host_to_target_msginfo(ptr, &msginfo))
2441 return -TARGET_EFAULT;
2448 struct target_msgbuf {
2453 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2454 unsigned int msgsz, int msgflg)
2456 struct target_msgbuf *target_mb;
2457 struct msgbuf *host_mb;
2460 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2461 return -TARGET_EFAULT;
2462 host_mb = malloc(msgsz+sizeof(long));
2463 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2464 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2465 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2467 unlock_user_struct(target_mb, msgp, 0);
2472 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2473 unsigned int msgsz, abi_long msgtyp,
2476 struct target_msgbuf *target_mb;
2478 struct msgbuf *host_mb;
2481 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2482 return -TARGET_EFAULT;
2484 host_mb = malloc(msgsz+sizeof(long));
2485 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2488 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2489 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2490 if (!target_mtext) {
2491 ret = -TARGET_EFAULT;
2494 memcpy(target_mb->mtext, host_mb->mtext, ret);
2495 unlock_user(target_mtext, target_mtext_addr, ret);
2498 target_mb->mtype = tswapl(host_mb->mtype);
2503 unlock_user_struct(target_mb, msgp, 1);
2507 struct target_shmid_ds
2509 struct target_ipc_perm shm_perm;
2510 abi_ulong shm_segsz;
2511 abi_ulong shm_atime;
2512 #if TARGET_ABI_BITS == 32
2513 abi_ulong __unused1;
2515 abi_ulong shm_dtime;
2516 #if TARGET_ABI_BITS == 32
2517 abi_ulong __unused2;
2519 abi_ulong shm_ctime;
2520 #if TARGET_ABI_BITS == 32
2521 abi_ulong __unused3;
2525 abi_ulong shm_nattch;
2526 unsigned long int __unused4;
2527 unsigned long int __unused5;
2530 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2531 abi_ulong target_addr)
2533 struct target_shmid_ds *target_sd;
2535 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2536 return -TARGET_EFAULT;
2537 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2538 return -TARGET_EFAULT;
2539 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2540 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2541 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2542 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2543 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2544 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2545 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2546 unlock_user_struct(target_sd, target_addr, 0);
2550 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2551 struct shmid_ds *host_sd)
2553 struct target_shmid_ds *target_sd;
2555 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2556 return -TARGET_EFAULT;
2557 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2558 return -TARGET_EFAULT;
2559 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2560 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2561 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2562 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2563 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2564 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2565 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2566 unlock_user_struct(target_sd, target_addr, 1);
2570 struct target_shminfo {
2578 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2579 struct shminfo *host_shminfo)
2581 struct target_shminfo *target_shminfo;
2582 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2583 return -TARGET_EFAULT;
2584 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2585 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2586 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2587 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2588 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2589 unlock_user_struct(target_shminfo, target_addr, 1);
2593 struct target_shm_info {
2598 abi_ulong swap_attempts;
2599 abi_ulong swap_successes;
2602 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2603 struct shm_info *host_shm_info)
2605 struct target_shm_info *target_shm_info;
2606 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2607 return -TARGET_EFAULT;
2608 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2609 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2610 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2611 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2612 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2613 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2614 unlock_user_struct(target_shm_info, target_addr, 1);
2618 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2620 struct shmid_ds dsarg;
2621 struct shminfo shminfo;
2622 struct shm_info shm_info;
2623 abi_long ret = -TARGET_EINVAL;
2631 if (target_to_host_shmid_ds(&dsarg, buf))
2632 return -TARGET_EFAULT;
2633 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2634 if (host_to_target_shmid_ds(buf, &dsarg))
2635 return -TARGET_EFAULT;
2638 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2639 if (host_to_target_shminfo(buf, &shminfo))
2640 return -TARGET_EFAULT;
2643 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2644 if (host_to_target_shm_info(buf, &shm_info))
2645 return -TARGET_EFAULT;
2650 ret = get_errno(shmctl(shmid, cmd, NULL));
2657 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2661 struct shmid_ds shm_info;
2664 /* find out the length of the shared memory segment */
2665 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2666 if (is_error(ret)) {
2667 /* can't get length, bail out */
2674 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2676 abi_ulong mmap_start;
2678 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2680 if (mmap_start == -1) {
2682 host_raddr = (void *)-1;
2684 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2687 if (host_raddr == (void *)-1) {
2689 return get_errno((long)host_raddr);
2691 raddr=h2g((unsigned long)host_raddr);
2693 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2694 PAGE_VALID | PAGE_READ |
2695 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2697 for (i = 0; i < N_SHM_REGIONS; i++) {
2698 if (shm_regions[i].start == 0) {
2699 shm_regions[i].start = raddr;
2700 shm_regions[i].size = shm_info.shm_segsz;
2710 static inline abi_long do_shmdt(abi_ulong shmaddr)
2714 for (i = 0; i < N_SHM_REGIONS; ++i) {
2715 if (shm_regions[i].start == shmaddr) {
2716 shm_regions[i].start = 0;
2717 page_set_flags(shmaddr, shm_regions[i].size, 0);
2722 return get_errno(shmdt(g2h(shmaddr)));
2725 #ifdef TARGET_NR_ipc
2726 /* ??? This only works with linear mappings. */
2727 /* do_ipc() must return target values and target errnos. */
2728 static abi_long do_ipc(unsigned int call, int first,
2729 int second, int third,
2730 abi_long ptr, abi_long fifth)
2735 version = call >> 16;
2740 ret = do_semop(first, ptr, second);
2744 ret = get_errno(semget(first, second, third));
2748 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2752 ret = get_errno(msgget(first, second));
2756 ret = do_msgsnd(first, ptr, second, third);
2760 ret = do_msgctl(first, second, ptr);
2767 struct target_ipc_kludge {
2772 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2773 ret = -TARGET_EFAULT;
2777 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2779 unlock_user_struct(tmp, ptr, 0);
2783 ret = do_msgrcv(first, ptr, second, fifth, third);
2792 raddr = do_shmat(first, ptr, second);
2793 if (is_error(raddr))
2794 return get_errno(raddr);
2795 if (put_user_ual(raddr, third))
2796 return -TARGET_EFAULT;
2800 ret = -TARGET_EINVAL;
2805 ret = do_shmdt(ptr);
2809 /* IPC_* flag values are the same on all linux platforms */
2810 ret = get_errno(shmget(first, second, third));
2813 /* IPC_* and SHM_* command values are the same on all linux platforms */
2815 ret = do_shmctl(first, second, third);
2818 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2819 ret = -TARGET_ENOSYS;
2826 /* kernel structure types definitions */
2829 #define STRUCT(name, list...) STRUCT_ ## name,
2830 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2832 #include "syscall_types.h"
2835 #undef STRUCT_SPECIAL
2837 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2838 #define STRUCT_SPECIAL(name)
2839 #include "syscall_types.h"
2841 #undef STRUCT_SPECIAL
2843 typedef struct IOCTLEntry {
2844 unsigned int target_cmd;
2845 unsigned int host_cmd;
2848 const argtype arg_type[5];
2851 #define IOC_R 0x0001
2852 #define IOC_W 0x0002
2853 #define IOC_RW (IOC_R | IOC_W)
2855 #define MAX_STRUCT_SIZE 4096
2857 static IOCTLEntry ioctl_entries[] = {
2858 #define IOCTL(cmd, access, types...) \
2859 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2864 /* ??? Implement proper locking for ioctls. */
2865 /* do_ioctl() Must return target values and target errnos. */
2866 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2868 const IOCTLEntry *ie;
2869 const argtype *arg_type;
2871 uint8_t buf_temp[MAX_STRUCT_SIZE];
2877 if (ie->target_cmd == 0) {
2878 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2879 return -TARGET_ENOSYS;
2881 if (ie->target_cmd == cmd)
2885 arg_type = ie->arg_type;
2887 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2889 switch(arg_type[0]) {
2892 ret = get_errno(ioctl(fd, ie->host_cmd));
2897 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2901 target_size = thunk_type_size(arg_type, 0);
2902 switch(ie->access) {
2904 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2905 if (!is_error(ret)) {
2906 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2908 return -TARGET_EFAULT;
2909 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2910 unlock_user(argptr, arg, target_size);
2914 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2916 return -TARGET_EFAULT;
2917 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2918 unlock_user(argptr, arg, 0);
2919 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2923 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2925 return -TARGET_EFAULT;
2926 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2927 unlock_user(argptr, arg, 0);
2928 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2929 if (!is_error(ret)) {
2930 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2932 return -TARGET_EFAULT;
2933 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2934 unlock_user(argptr, arg, target_size);
2940 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2941 (long)cmd, arg_type[0]);
2942 ret = -TARGET_ENOSYS;
2948 static const bitmask_transtbl iflag_tbl[] = {
2949 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2950 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2951 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2952 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2953 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2954 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2955 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2956 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2957 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2958 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2959 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2960 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2961 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2962 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2966 static const bitmask_transtbl oflag_tbl[] = {
2967 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2968 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2969 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2970 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2971 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2972 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2973 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2974 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2975 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2976 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2977 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2978 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2979 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2980 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2981 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2982 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2983 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2984 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2985 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2986 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2987 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2988 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2989 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2990 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2994 static const bitmask_transtbl cflag_tbl[] = {
2995 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2996 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2997 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2998 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2999 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3000 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3001 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3002 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3003 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3004 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3005 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3006 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3007 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3008 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3009 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3010 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3011 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3012 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3013 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3014 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3015 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3016 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3017 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3018 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3019 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3020 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3021 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3022 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3023 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3024 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3025 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3029 static const bitmask_transtbl lflag_tbl[] = {
3030 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3031 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3032 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3033 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3034 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3035 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3036 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3037 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3038 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3039 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3040 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3041 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3042 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3043 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3044 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3048 static void target_to_host_termios (void *dst, const void *src)
3050 struct host_termios *host = dst;
3051 const struct target_termios *target = src;
3054 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3056 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3058 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3060 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3061 host->c_line = target->c_line;
3063 memset(host->c_cc, 0, sizeof(host->c_cc));
3064 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3065 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3066 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3067 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3068 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3069 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3070 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3071 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3072 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3073 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3074 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3075 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3076 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3077 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3078 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3079 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3080 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3083 static void host_to_target_termios (void *dst, const void *src)
3085 struct target_termios *target = dst;
3086 const struct host_termios *host = src;
3089 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3091 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3093 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3095 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3096 target->c_line = host->c_line;
3098 memset(target->c_cc, 0, sizeof(target->c_cc));
3099 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3100 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3101 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3102 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3103 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3104 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3105 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3106 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3107 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3108 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3109 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3110 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3111 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3112 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3113 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3114 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3115 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3118 static const StructEntry struct_termios_def = {
3119 .convert = { host_to_target_termios, target_to_host_termios },
3120 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3121 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3124 static bitmask_transtbl mmap_flags_tbl[] = {
3125 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3126 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3127 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3128 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3129 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3130 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3131 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3132 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3136 #if defined(TARGET_I386)
3138 /* NOTE: there is really one LDT for all the threads */
3139 static uint8_t *ldt_table;
3141 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3148 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3149 if (size > bytecount)
3151 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3153 return -TARGET_EFAULT;
3154 /* ??? Should this by byteswapped? */
3155 memcpy(p, ldt_table, size);
3156 unlock_user(p, ptr, size);
3160 /* XXX: add locking support */
3161 static abi_long write_ldt(CPUX86State *env,
3162 abi_ulong ptr, unsigned long bytecount, int oldmode)
3164 struct target_modify_ldt_ldt_s ldt_info;
3165 struct target_modify_ldt_ldt_s *target_ldt_info;
3166 int seg_32bit, contents, read_exec_only, limit_in_pages;
3167 int seg_not_present, useable, lm;
3168 uint32_t *lp, entry_1, entry_2;
3170 if (bytecount != sizeof(ldt_info))
3171 return -TARGET_EINVAL;
3172 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3173 return -TARGET_EFAULT;
3174 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3175 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3176 ldt_info.limit = tswap32(target_ldt_info->limit);
3177 ldt_info.flags = tswap32(target_ldt_info->flags);
3178 unlock_user_struct(target_ldt_info, ptr, 0);
3180 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3181 return -TARGET_EINVAL;
3182 seg_32bit = ldt_info.flags & 1;
3183 contents = (ldt_info.flags >> 1) & 3;
3184 read_exec_only = (ldt_info.flags >> 3) & 1;
3185 limit_in_pages = (ldt_info.flags >> 4) & 1;
3186 seg_not_present = (ldt_info.flags >> 5) & 1;
3187 useable = (ldt_info.flags >> 6) & 1;
3191 lm = (ldt_info.flags >> 7) & 1;
3193 if (contents == 3) {
3195 return -TARGET_EINVAL;
3196 if (seg_not_present == 0)
3197 return -TARGET_EINVAL;
3199 /* allocate the LDT */
3201 env->ldt.base = target_mmap(0,
3202 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3203 PROT_READ|PROT_WRITE,
3204 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3205 if (env->ldt.base == -1)
3206 return -TARGET_ENOMEM;
3207 memset(g2h(env->ldt.base), 0,
3208 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3209 env->ldt.limit = 0xffff;
3210 ldt_table = g2h(env->ldt.base);
3213 /* NOTE: same code as Linux kernel */
3214 /* Allow LDTs to be cleared by the user. */
3215 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3218 read_exec_only == 1 &&
3220 limit_in_pages == 0 &&
3221 seg_not_present == 1 &&
3229 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3230 (ldt_info.limit & 0x0ffff);
3231 entry_2 = (ldt_info.base_addr & 0xff000000) |
3232 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3233 (ldt_info.limit & 0xf0000) |
3234 ((read_exec_only ^ 1) << 9) |
3236 ((seg_not_present ^ 1) << 15) |
3238 (limit_in_pages << 23) |
3242 entry_2 |= (useable << 20);
3244 /* Install the new entry ... */
3246 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3247 lp[0] = tswap32(entry_1);
3248 lp[1] = tswap32(entry_2);
3252 /* specific and weird i386 syscalls */
3253 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3254 unsigned long bytecount)
3260 ret = read_ldt(ptr, bytecount);
3263 ret = write_ldt(env, ptr, bytecount, 1);
3266 ret = write_ldt(env, ptr, bytecount, 0);
3269 ret = -TARGET_ENOSYS;
3275 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3276 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3278 uint64_t *gdt_table = g2h(env->gdt.base);
3279 struct target_modify_ldt_ldt_s ldt_info;
3280 struct target_modify_ldt_ldt_s *target_ldt_info;
3281 int seg_32bit, contents, read_exec_only, limit_in_pages;
3282 int seg_not_present, useable, lm;
3283 uint32_t *lp, entry_1, entry_2;
3286 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3287 if (!target_ldt_info)
3288 return -TARGET_EFAULT;
3289 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3290 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3291 ldt_info.limit = tswap32(target_ldt_info->limit);
3292 ldt_info.flags = tswap32(target_ldt_info->flags);
3293 if (ldt_info.entry_number == -1) {
3294 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3295 if (gdt_table[i] == 0) {
3296 ldt_info.entry_number = i;
3297 target_ldt_info->entry_number = tswap32(i);
3302 unlock_user_struct(target_ldt_info, ptr, 1);
3304 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3305 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3306 return -TARGET_EINVAL;
3307 seg_32bit = ldt_info.flags & 1;
3308 contents = (ldt_info.flags >> 1) & 3;
3309 read_exec_only = (ldt_info.flags >> 3) & 1;
3310 limit_in_pages = (ldt_info.flags >> 4) & 1;
3311 seg_not_present = (ldt_info.flags >> 5) & 1;
3312 useable = (ldt_info.flags >> 6) & 1;
3316 lm = (ldt_info.flags >> 7) & 1;
3319 if (contents == 3) {
3320 if (seg_not_present == 0)
3321 return -TARGET_EINVAL;
3324 /* NOTE: same code as Linux kernel */
3325 /* Allow LDTs to be cleared by the user. */
3326 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3327 if ((contents == 0 &&
3328 read_exec_only == 1 &&
3330 limit_in_pages == 0 &&
3331 seg_not_present == 1 &&
3339 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3340 (ldt_info.limit & 0x0ffff);
3341 entry_2 = (ldt_info.base_addr & 0xff000000) |
3342 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3343 (ldt_info.limit & 0xf0000) |
3344 ((read_exec_only ^ 1) << 9) |
3346 ((seg_not_present ^ 1) << 15) |
3348 (limit_in_pages << 23) |
3353 /* Install the new entry ... */
3355 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3356 lp[0] = tswap32(entry_1);
3357 lp[1] = tswap32(entry_2);
3361 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3363 struct target_modify_ldt_ldt_s *target_ldt_info;
3364 uint64_t *gdt_table = g2h(env->gdt.base);
3365 uint32_t base_addr, limit, flags;
3366 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3367 int seg_not_present, useable, lm;
3368 uint32_t *lp, entry_1, entry_2;
3370 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3371 if (!target_ldt_info)
3372 return -TARGET_EFAULT;
3373 idx = tswap32(target_ldt_info->entry_number);
3374 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3375 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3376 unlock_user_struct(target_ldt_info, ptr, 1);
3377 return -TARGET_EINVAL;
3379 lp = (uint32_t *)(gdt_table + idx);
3380 entry_1 = tswap32(lp[0]);
3381 entry_2 = tswap32(lp[1]);
3383 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3384 contents = (entry_2 >> 10) & 3;
3385 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3386 seg_32bit = (entry_2 >> 22) & 1;
3387 limit_in_pages = (entry_2 >> 23) & 1;
3388 useable = (entry_2 >> 20) & 1;
3392 lm = (entry_2 >> 21) & 1;
3394 flags = (seg_32bit << 0) | (contents << 1) |
3395 (read_exec_only << 3) | (limit_in_pages << 4) |
3396 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3397 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3398 base_addr = (entry_1 >> 16) |
3399 (entry_2 & 0xff000000) |
3400 ((entry_2 & 0xff) << 16);
3401 target_ldt_info->base_addr = tswapl(base_addr);
3402 target_ldt_info->limit = tswap32(limit);
3403 target_ldt_info->flags = tswap32(flags);
3404 unlock_user_struct(target_ldt_info, ptr, 1);
3407 #endif /* TARGET_I386 && TARGET_ABI32 */
3409 #ifndef TARGET_ABI32
3410 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3417 case TARGET_ARCH_SET_GS:
3418 case TARGET_ARCH_SET_FS:
3419 if (code == TARGET_ARCH_SET_GS)
3423 cpu_x86_load_seg(env, idx, 0);
3424 env->segs[idx].base = addr;
3426 case TARGET_ARCH_GET_GS:
3427 case TARGET_ARCH_GET_FS:
3428 if (code == TARGET_ARCH_GET_GS)
3432 val = env->segs[idx].base;
3433 if (put_user(val, addr, abi_ulong))
3434 return -TARGET_EFAULT;
3437 ret = -TARGET_EINVAL;
3444 #endif /* defined(TARGET_I386) */
3446 #if defined(USE_NPTL)
3448 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3450 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3453 pthread_mutex_t mutex;
3454 pthread_cond_t cond;
3458 abi_ulong child_tidptr;
3459 abi_ulong parent_tidptr;
3463 static void *clone_func(void *arg)
3465 new_thread_info *info = arg;
3471 ts = (TaskState *)thread_env->opaque;
3472 info->tid = gettid();
3474 if (info->child_tidptr)
3475 put_user_u32(info->tid, info->child_tidptr);
3476 if (info->parent_tidptr)
3477 put_user_u32(info->tid, info->parent_tidptr);
3478 /* Enable signals. */
3479 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3480 /* Signal to the parent that we're ready. */
3481 pthread_mutex_lock(&info->mutex);
3482 pthread_cond_broadcast(&info->cond);
3483 pthread_mutex_unlock(&info->mutex);
3484 /* Wait until the parent has finshed initializing the tls state. */
3485 pthread_mutex_lock(&clone_lock);
3486 pthread_mutex_unlock(&clone_lock);
3492 /* this stack is the equivalent of the kernel stack associated with a
3494 #define NEW_STACK_SIZE 8192
3496 static int clone_func(void *arg)
3498 CPUState *env = arg;
3505 /* do_fork() Must return host values and target errnos (unlike most
3506 do_*() functions). */
3507 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3508 abi_ulong parent_tidptr, target_ulong newtls,
3509 abi_ulong child_tidptr)
3515 #if defined(USE_NPTL)
3516 unsigned int nptl_flags;
3520 /* Emulate vfork() with fork() */
3521 if (flags & CLONE_VFORK)
3522 flags &= ~(CLONE_VFORK | CLONE_VM);
3524 if (flags & CLONE_VM) {
3525 TaskState *parent_ts = (TaskState *)env->opaque;
3526 #if defined(USE_NPTL)
3527 new_thread_info info;
3528 pthread_attr_t attr;
3530 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3531 init_task_state(ts);
3532 new_stack = ts->stack;
3533 /* we create a new CPU instance. */
3534 new_env = cpu_copy(env);
3535 /* Init regs that differ from the parent. */
3536 cpu_clone_regs(new_env, newsp);
3537 new_env->opaque = ts;
3538 ts->bprm = parent_ts->bprm;
3539 ts->info = parent_ts->info;
3540 #if defined(USE_NPTL)
3542 flags &= ~CLONE_NPTL_FLAGS2;
3544 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3545 ts->child_tidptr = child_tidptr;
3548 if (nptl_flags & CLONE_SETTLS)
3549 cpu_set_tls (new_env, newtls);
3551 /* Grab a mutex so that thread setup appears atomic. */
3552 pthread_mutex_lock(&clone_lock);
3554 memset(&info, 0, sizeof(info));
3555 pthread_mutex_init(&info.mutex, NULL);
3556 pthread_mutex_lock(&info.mutex);
3557 pthread_cond_init(&info.cond, NULL);
3559 if (nptl_flags & CLONE_CHILD_SETTID)
3560 info.child_tidptr = child_tidptr;
3561 if (nptl_flags & CLONE_PARENT_SETTID)
3562 info.parent_tidptr = parent_tidptr;
3564 ret = pthread_attr_init(&attr);
3565 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3566 /* It is not safe to deliver signals until the child has finished
3567 initializing, so temporarily block all signals. */
3568 sigfillset(&sigmask);
3569 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3571 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3572 /* TODO: Free new CPU state if thread creation failed. */
3574 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3575 pthread_attr_destroy(&attr);
3577 /* Wait for the child to initialize. */
3578 pthread_cond_wait(&info.cond, &info.mutex);
3580 if (flags & CLONE_PARENT_SETTID)
3581 put_user_u32(ret, parent_tidptr);
3585 pthread_mutex_unlock(&info.mutex);
3586 pthread_cond_destroy(&info.cond);
3587 pthread_mutex_destroy(&info.mutex);
3588 pthread_mutex_unlock(&clone_lock);
3590 if (flags & CLONE_NPTL_FLAGS2)
3592 /* This is probably going to die very quickly, but do it anyway. */
3594 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3596 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3600 /* if no CLONE_VM, we consider it is a fork */
3601 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3606 /* Child Process. */
3607 cpu_clone_regs(env, newsp);
3609 #if defined(USE_NPTL)
3610 /* There is a race condition here. The parent process could
3611 theoretically read the TID in the child process before the child
3612 tid is set. This would require using either ptrace
3613 (not implemented) or having *_tidptr to point at a shared memory
3614 mapping. We can't repeat the spinlock hack used above because
3615 the child process gets its own copy of the lock. */
3616 if (flags & CLONE_CHILD_SETTID)
3617 put_user_u32(gettid(), child_tidptr);
3618 if (flags & CLONE_PARENT_SETTID)
3619 put_user_u32(gettid(), parent_tidptr);
3620 ts = (TaskState *)env->opaque;
3621 if (flags & CLONE_SETTLS)
3622 cpu_set_tls (env, newtls);
3623 if (flags & CLONE_CHILD_CLEARTID)
3624 ts->child_tidptr = child_tidptr;
3633 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3636 struct target_flock *target_fl;
3637 struct flock64 fl64;
3638 struct target_flock64 *target_fl64;
3642 case TARGET_F_GETLK:
3643 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3644 return -TARGET_EFAULT;
3645 fl.l_type = tswap16(target_fl->l_type);
3646 fl.l_whence = tswap16(target_fl->l_whence);
3647 fl.l_start = tswapl(target_fl->l_start);
3648 fl.l_len = tswapl(target_fl->l_len);
3649 fl.l_pid = tswapl(target_fl->l_pid);
3650 unlock_user_struct(target_fl, arg, 0);
3651 ret = get_errno(fcntl(fd, F_GETLK, &fl));
3653 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3654 return -TARGET_EFAULT;
3655 target_fl->l_type = tswap16(fl.l_type);
3656 target_fl->l_whence = tswap16(fl.l_whence);
3657 target_fl->l_start = tswapl(fl.l_start);
3658 target_fl->l_len = tswapl(fl.l_len);
3659 target_fl->l_pid = tswapl(fl.l_pid);
3660 unlock_user_struct(target_fl, arg, 1);
3664 case TARGET_F_SETLK:
3665 case TARGET_F_SETLKW:
3666 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3667 return -TARGET_EFAULT;
3668 fl.l_type = tswap16(target_fl->l_type);
3669 fl.l_whence = tswap16(target_fl->l_whence);
3670 fl.l_start = tswapl(target_fl->l_start);
3671 fl.l_len = tswapl(target_fl->l_len);
3672 fl.l_pid = tswapl(target_fl->l_pid);
3673 unlock_user_struct(target_fl, arg, 0);
3674 ret = get_errno(fcntl(fd, F_SETLK+(cmd-TARGET_F_SETLK), &fl));
3677 case TARGET_F_GETLK64:
3678 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3679 return -TARGET_EFAULT;
3680 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3681 fl64.l_whence = tswap16(target_fl64->l_whence);
3682 fl64.l_start = tswapl(target_fl64->l_start);
3683 fl64.l_len = tswapl(target_fl64->l_len);
3684 fl64.l_pid = tswap16(target_fl64->l_pid);
3685 unlock_user_struct(target_fl64, arg, 0);
3686 ret = get_errno(fcntl(fd, F_GETLK64, &fl64));
3688 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3689 return -TARGET_EFAULT;
3690 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3691 target_fl64->l_whence = tswap16(fl64.l_whence);
3692 target_fl64->l_start = tswapl(fl64.l_start);
3693 target_fl64->l_len = tswapl(fl64.l_len);
3694 target_fl64->l_pid = tswapl(fl64.l_pid);
3695 unlock_user_struct(target_fl64, arg, 1);
3698 case TARGET_F_SETLK64:
3699 case TARGET_F_SETLKW64:
3700 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3701 return -TARGET_EFAULT;
3702 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3703 fl64.l_whence = tswap16(target_fl64->l_whence);
3704 fl64.l_start = tswapl(target_fl64->l_start);
3705 fl64.l_len = tswapl(target_fl64->l_len);
3706 fl64.l_pid = tswap16(target_fl64->l_pid);
3707 unlock_user_struct(target_fl64, arg, 0);
3708 ret = get_errno(fcntl(fd, F_SETLK64+(cmd-TARGET_F_SETLK64), &fl64));
3712 ret = get_errno(fcntl(fd, cmd, arg));
3714 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3719 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3723 ret = get_errno(fcntl(fd, cmd, arg));
3731 static inline int high2lowuid(int uid)
3739 static inline int high2lowgid(int gid)
3747 static inline int low2highuid(int uid)
3749 if ((int16_t)uid == -1)
3755 static inline int low2highgid(int gid)
3757 if ((int16_t)gid == -1)
3763 #endif /* USE_UID16 */
3765 void syscall_init(void)
3768 const argtype *arg_type;
3772 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3773 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3774 #include "syscall_types.h"
3776 #undef STRUCT_SPECIAL
3778 /* we patch the ioctl size if necessary. We rely on the fact that
3779 no ioctl has all the bits at '1' in the size field */
3781 while (ie->target_cmd != 0) {
3782 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3783 TARGET_IOC_SIZEMASK) {
3784 arg_type = ie->arg_type;
3785 if (arg_type[0] != TYPE_PTR) {
3786 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3791 size = thunk_type_size(arg_type, 0);
3792 ie->target_cmd = (ie->target_cmd &
3793 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3794 (size << TARGET_IOC_SIZESHIFT);
3797 /* Build target_to_host_errno_table[] table from
3798 * host_to_target_errno_table[]. */
3799 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3800 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3802 /* automatic consistency check if same arch */
3803 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3804 (defined(__x86_64__) && defined(TARGET_X86_64))
3805 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3806 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3807 ie->name, ie->target_cmd, ie->host_cmd);
3814 #if TARGET_ABI_BITS == 32
3815 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3817 #ifdef TARGET_WORDS_BIGENDIAN
3818 return ((uint64_t)word0 << 32) | word1;
3820 return ((uint64_t)word1 << 32) | word0;
3823 #else /* TARGET_ABI_BITS == 32 */
3824 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3828 #endif /* TARGET_ABI_BITS != 32 */
3830 #ifdef TARGET_NR_truncate64
3831 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3837 if (((CPUARMState *)cpu_env)->eabi)
3843 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3847 #ifdef TARGET_NR_ftruncate64
3848 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3854 if (((CPUARMState *)cpu_env)->eabi)
3860 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3864 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3865 abi_ulong target_addr)
3867 struct target_timespec *target_ts;
3869 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3870 return -TARGET_EFAULT;
3871 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3872 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3873 unlock_user_struct(target_ts, target_addr, 0);
3877 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3878 struct timespec *host_ts)
3880 struct target_timespec *target_ts;
3882 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3883 return -TARGET_EFAULT;
3884 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3885 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3886 unlock_user_struct(target_ts, target_addr, 1);
3890 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3891 static inline abi_long host_to_target_stat64(void *cpu_env,
3892 abi_ulong target_addr,
3893 struct stat *host_st)
3896 if (((CPUARMState *)cpu_env)->eabi) {
3897 struct target_eabi_stat64 *target_st;
3899 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3900 return -TARGET_EFAULT;
3901 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3902 __put_user(host_st->st_dev, &target_st->st_dev);
3903 __put_user(host_st->st_ino, &target_st->st_ino);
3904 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3905 __put_user(host_st->st_ino, &target_st->__st_ino);
3907 __put_user(host_st->st_mode, &target_st->st_mode);
3908 __put_user(host_st->st_nlink, &target_st->st_nlink);
3909 __put_user(host_st->st_uid, &target_st->st_uid);
3910 __put_user(host_st->st_gid, &target_st->st_gid);
3911 __put_user(host_st->st_rdev, &target_st->st_rdev);
3912 __put_user(host_st->st_size, &target_st->st_size);
3913 __put_user(host_st->st_blksize, &target_st->st_blksize);
3914 __put_user(host_st->st_blocks, &target_st->st_blocks);
3915 __put_user(host_st->st_atime, &target_st->target_st_atime);
3916 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3917 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3918 unlock_user_struct(target_st, target_addr, 1);
3922 #if TARGET_LONG_BITS == 64
3923 struct target_stat *target_st;
3925 struct target_stat64 *target_st;
3928 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3929 return -TARGET_EFAULT;
3930 memset(target_st, 0, sizeof(*target_st));
3931 __put_user(host_st->st_dev, &target_st->st_dev);
3932 __put_user(host_st->st_ino, &target_st->st_ino);
3933 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3934 __put_user(host_st->st_ino, &target_st->__st_ino);
3936 __put_user(host_st->st_mode, &target_st->st_mode);
3937 __put_user(host_st->st_nlink, &target_st->st_nlink);
3938 __put_user(host_st->st_uid, &target_st->st_uid);
3939 __put_user(host_st->st_gid, &target_st->st_gid);
3940 __put_user(host_st->st_rdev, &target_st->st_rdev);
3941 /* XXX: better use of kernel struct */
3942 __put_user(host_st->st_size, &target_st->st_size);
3943 __put_user(host_st->st_blksize, &target_st->st_blksize);
3944 __put_user(host_st->st_blocks, &target_st->st_blocks);
3945 __put_user(host_st->st_atime, &target_st->target_st_atime);
3946 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3947 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3948 unlock_user_struct(target_st, target_addr, 1);
3955 #if defined(USE_NPTL)
3956 /* ??? Using host futex calls even when target atomic operations
3957 are not really atomic probably breaks things. However implementing
3958 futexes locally would make futexes shared between multiple processes
3959 tricky. However they're probably useless because guest atomic
3960 operations won't work either. */
3961 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3962 target_ulong uaddr2, int val3)
3964 struct timespec ts, *pts;
3966 /* ??? We assume FUTEX_* constants are the same on both host
3968 #ifdef FUTEX_CMD_MASK
3969 switch ((op&FUTEX_CMD_MASK)) {
3976 target_to_host_timespec(pts, timeout);
3980 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
3983 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
3985 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
3987 return get_errno(sys_futex(g2h(uaddr), op, val,
3988 NULL, g2h(uaddr2), 0));
3989 case FUTEX_CMP_REQUEUE:
3990 return get_errno(sys_futex(g2h(uaddr), op, val,
3991 NULL, g2h(uaddr2), tswap32(val3)));
3993 return -TARGET_ENOSYS;
3998 /* Map host to target signal numbers for the wait family of syscalls.
3999 Assume all other status bits are the same. */
4000 static int host_to_target_waitstatus(int status)
4002 if (WIFSIGNALED(status)) {
4003 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4005 if (WIFSTOPPED(status)) {
4006 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4012 int get_osversion(void)
4014 static int osversion;
4015 struct new_utsname buf;
4020 if (qemu_uname_release && *qemu_uname_release) {
4021 s = qemu_uname_release;
4023 if (sys_uname(&buf))
4028 for (i = 0; i < 3; i++) {
4030 while (*s >= '0' && *s <= '9') {
4035 tmp = (tmp << 8) + n;
4043 /* do_syscall() should always have a single exit point at the end so
4044 that actions, such as logging of syscall results, can be performed.
4045 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4046 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4047 abi_long arg2, abi_long arg3, abi_long arg4,
4048 abi_long arg5, abi_long arg6)
4056 gemu_log("syscall %d", num);
4059 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4062 case TARGET_NR_exit:
4064 /* In old applications this may be used to implement _exit(2).
4065 However in threaded applictions it is used for thread termination,
4066 and _exit_group is used for application termination.
4067 Do thread termination if we have more then one thread. */
4068 /* FIXME: This probably breaks if a signal arrives. We should probably
4069 be disabling signals. */
4070 if (first_cpu->next_cpu) {
4077 while (p && p != (CPUState *)cpu_env) {
4078 lastp = &p->next_cpu;
4081 /* If we didn't find the CPU for this thread then something is
4085 /* Remove the CPU from the list. */
4086 *lastp = p->next_cpu;
4088 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4089 if (ts->child_tidptr) {
4090 put_user_u32(0, ts->child_tidptr);
4091 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4094 /* TODO: Free CPU state. */
4101 gdb_exit(cpu_env, arg1);
4103 ret = 0; /* avoid warning */
4105 case TARGET_NR_read:
4109 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4111 ret = get_errno(read(arg1, p, arg3));
4112 unlock_user(p, arg2, ret);
4115 case TARGET_NR_write:
4116 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4118 ret = get_errno(write(arg1, p, arg3));
4119 unlock_user(p, arg2, 0);
4121 case TARGET_NR_open:
4122 if (!(p = lock_user_string(arg1)))
4124 ret = get_errno(open(path(p),
4125 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4127 unlock_user(p, arg1, 0);
4129 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4130 case TARGET_NR_openat:
4131 if (!(p = lock_user_string(arg2)))
4133 ret = get_errno(sys_openat(arg1,
4135 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4137 unlock_user(p, arg2, 0);
4140 case TARGET_NR_close:
4141 ret = get_errno(close(arg1));
4146 case TARGET_NR_fork:
4147 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4149 #ifdef TARGET_NR_waitpid
4150 case TARGET_NR_waitpid:
4153 ret = get_errno(waitpid(arg1, &status, arg3));
4154 if (!is_error(ret) && arg2
4155 && put_user_s32(host_to_target_waitstatus(status), arg2))
4160 #ifdef TARGET_NR_waitid
4161 case TARGET_NR_waitid:
4165 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4166 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4167 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4169 host_to_target_siginfo(p, &info);
4170 unlock_user(p, arg3, sizeof(target_siginfo_t));
4175 #ifdef TARGET_NR_creat /* not on alpha */
4176 case TARGET_NR_creat:
4177 if (!(p = lock_user_string(arg1)))
4179 ret = get_errno(creat(p, arg2));
4180 unlock_user(p, arg1, 0);
4183 case TARGET_NR_link:
4186 p = lock_user_string(arg1);
4187 p2 = lock_user_string(arg2);
4189 ret = -TARGET_EFAULT;
4191 ret = get_errno(link(p, p2));
4192 unlock_user(p2, arg2, 0);
4193 unlock_user(p, arg1, 0);
4196 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4197 case TARGET_NR_linkat:
4202 p = lock_user_string(arg2);
4203 p2 = lock_user_string(arg4);
4205 ret = -TARGET_EFAULT;
4207 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4208 unlock_user(p, arg2, 0);
4209 unlock_user(p2, arg4, 0);
4213 case TARGET_NR_unlink:
4214 if (!(p = lock_user_string(arg1)))
4216 ret = get_errno(unlink(p));
4217 unlock_user(p, arg1, 0);
4219 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4220 case TARGET_NR_unlinkat:
4221 if (!(p = lock_user_string(arg2)))
4223 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4224 unlock_user(p, arg2, 0);
4227 case TARGET_NR_execve:
4229 char **argp, **envp;
4232 abi_ulong guest_argp;
4233 abi_ulong guest_envp;
4239 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4240 if (get_user_ual(addr, gp))
4248 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4249 if (get_user_ual(addr, gp))
4256 argp = alloca((argc + 1) * sizeof(void *));
4257 envp = alloca((envc + 1) * sizeof(void *));
4259 for (gp = guest_argp, q = argp; gp;
4260 gp += sizeof(abi_ulong), q++) {
4261 if (get_user_ual(addr, gp))
4265 if (!(*q = lock_user_string(addr)))
4270 for (gp = guest_envp, q = envp; gp;
4271 gp += sizeof(abi_ulong), q++) {
4272 if (get_user_ual(addr, gp))
4276 if (!(*q = lock_user_string(addr)))
4281 if (!(p = lock_user_string(arg1)))
4283 ret = get_errno(execve(p, argp, envp));
4284 unlock_user(p, arg1, 0);
4289 ret = -TARGET_EFAULT;
4292 for (gp = guest_argp, q = argp; *q;
4293 gp += sizeof(abi_ulong), q++) {
4294 if (get_user_ual(addr, gp)
4297 unlock_user(*q, addr, 0);
4299 for (gp = guest_envp, q = envp; *q;
4300 gp += sizeof(abi_ulong), q++) {
4301 if (get_user_ual(addr, gp)
4304 unlock_user(*q, addr, 0);
4308 case TARGET_NR_chdir:
4309 if (!(p = lock_user_string(arg1)))
4311 ret = get_errno(chdir(p));
4312 unlock_user(p, arg1, 0);
4314 #ifdef TARGET_NR_time
4315 case TARGET_NR_time:
4318 ret = get_errno(time(&host_time));
4321 && put_user_sal(host_time, arg1))
4326 case TARGET_NR_mknod:
4327 if (!(p = lock_user_string(arg1)))
4329 ret = get_errno(mknod(p, arg2, arg3));
4330 unlock_user(p, arg1, 0);
4332 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4333 case TARGET_NR_mknodat:
4334 if (!(p = lock_user_string(arg2)))
4336 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4337 unlock_user(p, arg2, 0);
4340 case TARGET_NR_chmod:
4341 if (!(p = lock_user_string(arg1)))
4343 ret = get_errno(chmod(p, arg2));
4344 unlock_user(p, arg1, 0);
4346 #ifdef TARGET_NR_break
4347 case TARGET_NR_break:
4350 #ifdef TARGET_NR_oldstat
4351 case TARGET_NR_oldstat:
4354 case TARGET_NR_lseek:
4355 ret = get_errno(lseek(arg1, arg2, arg3));
4357 #ifdef TARGET_NR_getxpid
4358 case TARGET_NR_getxpid:
4360 case TARGET_NR_getpid:
4362 ret = get_errno(getpid());
4364 case TARGET_NR_mount:
4366 /* need to look at the data field */
4368 p = lock_user_string(arg1);
4369 p2 = lock_user_string(arg2);
4370 p3 = lock_user_string(arg3);
4371 if (!p || !p2 || !p3)
4372 ret = -TARGET_EFAULT;
4374 /* FIXME - arg5 should be locked, but it isn't clear how to
4375 * do that since it's not guaranteed to be a NULL-terminated
4378 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4379 unlock_user(p, arg1, 0);
4380 unlock_user(p2, arg2, 0);
4381 unlock_user(p3, arg3, 0);
4384 #ifdef TARGET_NR_umount
4385 case TARGET_NR_umount:
4386 if (!(p = lock_user_string(arg1)))
4388 ret = get_errno(umount(p));
4389 unlock_user(p, arg1, 0);
4392 #ifdef TARGET_NR_stime /* not on alpha */
4393 case TARGET_NR_stime:
4396 if (get_user_sal(host_time, arg1))
4398 ret = get_errno(stime(&host_time));
4402 case TARGET_NR_ptrace:
4404 #ifdef TARGET_NR_alarm /* not on alpha */
4405 case TARGET_NR_alarm:
4409 #ifdef TARGET_NR_oldfstat
4410 case TARGET_NR_oldfstat:
4413 #ifdef TARGET_NR_pause /* not on alpha */
4414 case TARGET_NR_pause:
4415 ret = get_errno(pause());
4418 #ifdef TARGET_NR_utime
4419 case TARGET_NR_utime:
4421 struct utimbuf tbuf, *host_tbuf;
4422 struct target_utimbuf *target_tbuf;
4424 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4426 tbuf.actime = tswapl(target_tbuf->actime);
4427 tbuf.modtime = tswapl(target_tbuf->modtime);
4428 unlock_user_struct(target_tbuf, arg2, 0);
4433 if (!(p = lock_user_string(arg1)))
4435 ret = get_errno(utime(p, host_tbuf));
4436 unlock_user(p, arg1, 0);
4440 case TARGET_NR_utimes:
4442 struct timeval *tvp, tv[2];
4444 if (copy_from_user_timeval(&tv[0], arg2)
4445 || copy_from_user_timeval(&tv[1],
4446 arg2 + sizeof(struct target_timeval)))
4452 if (!(p = lock_user_string(arg1)))
4454 ret = get_errno(utimes(p, tvp));
4455 unlock_user(p, arg1, 0);
4458 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4459 case TARGET_NR_futimesat:
4461 struct timeval *tvp, tv[2];
4463 if (copy_from_user_timeval(&tv[0], arg3)
4464 || copy_from_user_timeval(&tv[1],
4465 arg3 + sizeof(struct target_timeval)))
4471 if (!(p = lock_user_string(arg2)))
4473 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4474 unlock_user(p, arg2, 0);
4478 #ifdef TARGET_NR_stty
4479 case TARGET_NR_stty:
4482 #ifdef TARGET_NR_gtty
4483 case TARGET_NR_gtty:
4486 case TARGET_NR_access:
4487 if (!(p = lock_user_string(arg1)))
4489 ret = get_errno(access(p, arg2));
4490 unlock_user(p, arg1, 0);
4492 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4493 case TARGET_NR_faccessat:
4494 if (!(p = lock_user_string(arg2)))
4496 ret = get_errno(sys_faccessat(arg1, p, arg3));
4497 unlock_user(p, arg2, 0);
4500 #ifdef TARGET_NR_nice /* not on alpha */
4501 case TARGET_NR_nice:
4502 ret = get_errno(nice(arg1));
4505 #ifdef TARGET_NR_ftime
4506 case TARGET_NR_ftime:
4509 case TARGET_NR_sync:
4513 case TARGET_NR_kill:
4514 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4516 case TARGET_NR_rename:
4519 p = lock_user_string(arg1);
4520 p2 = lock_user_string(arg2);
4522 ret = -TARGET_EFAULT;
4524 ret = get_errno(rename(p, p2));
4525 unlock_user(p2, arg2, 0);
4526 unlock_user(p, arg1, 0);
4529 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4530 case TARGET_NR_renameat:
4533 p = lock_user_string(arg2);
4534 p2 = lock_user_string(arg4);
4536 ret = -TARGET_EFAULT;
4538 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4539 unlock_user(p2, arg4, 0);
4540 unlock_user(p, arg2, 0);
4544 case TARGET_NR_mkdir:
4545 if (!(p = lock_user_string(arg1)))
4547 ret = get_errno(mkdir(p, arg2));
4548 unlock_user(p, arg1, 0);
4550 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4551 case TARGET_NR_mkdirat:
4552 if (!(p = lock_user_string(arg2)))
4554 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4555 unlock_user(p, arg2, 0);
4558 case TARGET_NR_rmdir:
4559 if (!(p = lock_user_string(arg1)))
4561 ret = get_errno(rmdir(p));
4562 unlock_user(p, arg1, 0);
4565 ret = get_errno(dup(arg1));
4567 case TARGET_NR_pipe:
4568 ret = do_pipe(cpu_env, arg1, 0);
4570 #ifdef TARGET_NR_pipe2
4571 case TARGET_NR_pipe2:
4572 ret = do_pipe(cpu_env, arg1, arg2);
4575 case TARGET_NR_times:
4577 struct target_tms *tmsp;
4579 ret = get_errno(times(&tms));
4581 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4584 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4585 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4586 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4587 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4590 ret = host_to_target_clock_t(ret);
4593 #ifdef TARGET_NR_prof
4594 case TARGET_NR_prof:
4597 #ifdef TARGET_NR_signal
4598 case TARGET_NR_signal:
4601 case TARGET_NR_acct:
4603 ret = get_errno(acct(NULL));
4605 if (!(p = lock_user_string(arg1)))
4607 ret = get_errno(acct(path(p)));
4608 unlock_user(p, arg1, 0);
4611 #ifdef TARGET_NR_umount2 /* not on alpha */
4612 case TARGET_NR_umount2:
4613 if (!(p = lock_user_string(arg1)))
4615 ret = get_errno(umount2(p, arg2));
4616 unlock_user(p, arg1, 0);
4619 #ifdef TARGET_NR_lock
4620 case TARGET_NR_lock:
4623 case TARGET_NR_ioctl:
4624 ret = do_ioctl(arg1, arg2, arg3);
4626 case TARGET_NR_fcntl:
4627 ret = do_fcntl(arg1, arg2, arg3);
4629 #ifdef TARGET_NR_mpx
4633 case TARGET_NR_setpgid:
4634 ret = get_errno(setpgid(arg1, arg2));
4636 #ifdef TARGET_NR_ulimit
4637 case TARGET_NR_ulimit:
4640 #ifdef TARGET_NR_oldolduname
4641 case TARGET_NR_oldolduname:
4644 case TARGET_NR_umask:
4645 ret = get_errno(umask(arg1));
4647 case TARGET_NR_chroot:
4648 if (!(p = lock_user_string(arg1)))
4650 ret = get_errno(chroot(p));
4651 unlock_user(p, arg1, 0);
4653 case TARGET_NR_ustat:
4655 case TARGET_NR_dup2:
4656 ret = get_errno(dup2(arg1, arg2));
4658 #ifdef TARGET_NR_getppid /* not on alpha */
4659 case TARGET_NR_getppid:
4660 ret = get_errno(getppid());
4663 case TARGET_NR_getpgrp:
4664 ret = get_errno(getpgrp());
4666 case TARGET_NR_setsid:
4667 ret = get_errno(setsid());
4669 #ifdef TARGET_NR_sigaction
4670 case TARGET_NR_sigaction:
4672 #if !defined(TARGET_MIPS)
4673 struct target_old_sigaction *old_act;
4674 struct target_sigaction act, oact, *pact;
4676 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4678 act._sa_handler = old_act->_sa_handler;
4679 target_siginitset(&act.sa_mask, old_act->sa_mask);
4680 act.sa_flags = old_act->sa_flags;
4681 act.sa_restorer = old_act->sa_restorer;
4682 unlock_user_struct(old_act, arg2, 0);
4687 ret = get_errno(do_sigaction(arg1, pact, &oact));
4688 if (!is_error(ret) && arg3) {
4689 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4691 old_act->_sa_handler = oact._sa_handler;
4692 old_act->sa_mask = oact.sa_mask.sig[0];
4693 old_act->sa_flags = oact.sa_flags;
4694 old_act->sa_restorer = oact.sa_restorer;
4695 unlock_user_struct(old_act, arg3, 1);
4698 struct target_sigaction act, oact, *pact, *old_act;
4701 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4703 act._sa_handler = old_act->_sa_handler;
4704 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4705 act.sa_flags = old_act->sa_flags;
4706 unlock_user_struct(old_act, arg2, 0);
4712 ret = get_errno(do_sigaction(arg1, pact, &oact));
4714 if (!is_error(ret) && arg3) {
4715 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4717 old_act->_sa_handler = oact._sa_handler;
4718 old_act->sa_flags = oact.sa_flags;
4719 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4720 old_act->sa_mask.sig[1] = 0;
4721 old_act->sa_mask.sig[2] = 0;
4722 old_act->sa_mask.sig[3] = 0;
4723 unlock_user_struct(old_act, arg3, 1);
4729 case TARGET_NR_rt_sigaction:
4731 struct target_sigaction *act;
4732 struct target_sigaction *oact;
4735 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4740 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4741 ret = -TARGET_EFAULT;
4742 goto rt_sigaction_fail;
4746 ret = get_errno(do_sigaction(arg1, act, oact));
4749 unlock_user_struct(act, arg2, 0);
4751 unlock_user_struct(oact, arg3, 1);
4754 #ifdef TARGET_NR_sgetmask /* not on alpha */
4755 case TARGET_NR_sgetmask:
4758 abi_ulong target_set;
4759 sigprocmask(0, NULL, &cur_set);
4760 host_to_target_old_sigset(&target_set, &cur_set);
4765 #ifdef TARGET_NR_ssetmask /* not on alpha */
4766 case TARGET_NR_ssetmask:
4768 sigset_t set, oset, cur_set;
4769 abi_ulong target_set = arg1;
4770 sigprocmask(0, NULL, &cur_set);
4771 target_to_host_old_sigset(&set, &target_set);
4772 sigorset(&set, &set, &cur_set);
4773 sigprocmask(SIG_SETMASK, &set, &oset);
4774 host_to_target_old_sigset(&target_set, &oset);
4779 #ifdef TARGET_NR_sigprocmask
4780 case TARGET_NR_sigprocmask:
4783 sigset_t set, oldset, *set_ptr;
4787 case TARGET_SIG_BLOCK:
4790 case TARGET_SIG_UNBLOCK:
4793 case TARGET_SIG_SETMASK:
4797 ret = -TARGET_EINVAL;
4800 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4802 target_to_host_old_sigset(&set, p);
4803 unlock_user(p, arg2, 0);
4809 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4810 if (!is_error(ret) && arg3) {
4811 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4813 host_to_target_old_sigset(p, &oldset);
4814 unlock_user(p, arg3, sizeof(target_sigset_t));
4819 case TARGET_NR_rt_sigprocmask:
4822 sigset_t set, oldset, *set_ptr;
4826 case TARGET_SIG_BLOCK:
4829 case TARGET_SIG_UNBLOCK:
4832 case TARGET_SIG_SETMASK:
4836 ret = -TARGET_EINVAL;
4839 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4841 target_to_host_sigset(&set, p);
4842 unlock_user(p, arg2, 0);
4848 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4849 if (!is_error(ret) && arg3) {
4850 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4852 host_to_target_sigset(p, &oldset);
4853 unlock_user(p, arg3, sizeof(target_sigset_t));
4857 #ifdef TARGET_NR_sigpending
4858 case TARGET_NR_sigpending:
4861 ret = get_errno(sigpending(&set));
4862 if (!is_error(ret)) {
4863 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4865 host_to_target_old_sigset(p, &set);
4866 unlock_user(p, arg1, sizeof(target_sigset_t));
4871 case TARGET_NR_rt_sigpending:
4874 ret = get_errno(sigpending(&set));
4875 if (!is_error(ret)) {
4876 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4878 host_to_target_sigset(p, &set);
4879 unlock_user(p, arg1, sizeof(target_sigset_t));
4883 #ifdef TARGET_NR_sigsuspend
4884 case TARGET_NR_sigsuspend:
4887 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4889 target_to_host_old_sigset(&set, p);
4890 unlock_user(p, arg1, 0);
4891 ret = get_errno(sigsuspend(&set));
4895 case TARGET_NR_rt_sigsuspend:
4898 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4900 target_to_host_sigset(&set, p);
4901 unlock_user(p, arg1, 0);
4902 ret = get_errno(sigsuspend(&set));
4905 case TARGET_NR_rt_sigtimedwait:
4908 struct timespec uts, *puts;
4911 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4913 target_to_host_sigset(&set, p);
4914 unlock_user(p, arg1, 0);
4917 target_to_host_timespec(puts, arg3);
4921 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4922 if (!is_error(ret) && arg2) {
4923 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4925 host_to_target_siginfo(p, &uinfo);
4926 unlock_user(p, arg2, sizeof(target_siginfo_t));
4930 case TARGET_NR_rt_sigqueueinfo:
4933 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4935 target_to_host_siginfo(&uinfo, p);
4936 unlock_user(p, arg1, 0);
4937 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4940 #ifdef TARGET_NR_sigreturn
4941 case TARGET_NR_sigreturn:
4942 /* NOTE: ret is eax, so not transcoding must be done */
4943 ret = do_sigreturn(cpu_env);
4946 case TARGET_NR_rt_sigreturn:
4947 /* NOTE: ret is eax, so not transcoding must be done */
4948 ret = do_rt_sigreturn(cpu_env);
4950 case TARGET_NR_sethostname:
4951 if (!(p = lock_user_string(arg1)))
4953 ret = get_errno(sethostname(p, arg2));
4954 unlock_user(p, arg1, 0);
4956 case TARGET_NR_setrlimit:
4958 /* XXX: convert resource ? */
4959 int resource = arg1;
4960 struct target_rlimit *target_rlim;
4962 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4964 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4965 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4966 unlock_user_struct(target_rlim, arg2, 0);
4967 ret = get_errno(setrlimit(resource, &rlim));
4970 case TARGET_NR_getrlimit:
4972 /* XXX: convert resource ? */
4973 int resource = arg1;
4974 struct target_rlimit *target_rlim;
4977 ret = get_errno(getrlimit(resource, &rlim));
4978 if (!is_error(ret)) {
4979 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4981 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4982 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4983 unlock_user_struct(target_rlim, arg2, 1);
4987 case TARGET_NR_getrusage:
4989 struct rusage rusage;
4990 ret = get_errno(getrusage(arg1, &rusage));
4991 if (!is_error(ret)) {
4992 host_to_target_rusage(arg2, &rusage);
4996 case TARGET_NR_gettimeofday:
4999 ret = get_errno(gettimeofday(&tv, NULL));
5000 if (!is_error(ret)) {
5001 if (copy_to_user_timeval(arg1, &tv))
5006 case TARGET_NR_settimeofday:
5009 if (copy_from_user_timeval(&tv, arg1))
5011 ret = get_errno(settimeofday(&tv, NULL));
5014 #ifdef TARGET_NR_select
5015 case TARGET_NR_select:
5017 struct target_sel_arg_struct *sel;
5018 abi_ulong inp, outp, exp, tvp;
5021 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5023 nsel = tswapl(sel->n);
5024 inp = tswapl(sel->inp);
5025 outp = tswapl(sel->outp);
5026 exp = tswapl(sel->exp);
5027 tvp = tswapl(sel->tvp);
5028 unlock_user_struct(sel, arg1, 0);
5029 ret = do_select(nsel, inp, outp, exp, tvp);
5033 case TARGET_NR_symlink:
5036 p = lock_user_string(arg1);
5037 p2 = lock_user_string(arg2);
5039 ret = -TARGET_EFAULT;
5041 ret = get_errno(symlink(p, p2));
5042 unlock_user(p2, arg2, 0);
5043 unlock_user(p, arg1, 0);
5046 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5047 case TARGET_NR_symlinkat:
5050 p = lock_user_string(arg1);
5051 p2 = lock_user_string(arg3);
5053 ret = -TARGET_EFAULT;
5055 ret = get_errno(sys_symlinkat(p, arg2, p2));
5056 unlock_user(p2, arg3, 0);
5057 unlock_user(p, arg1, 0);
5061 #ifdef TARGET_NR_oldlstat
5062 case TARGET_NR_oldlstat:
5065 case TARGET_NR_readlink:
5068 p = lock_user_string(arg1);
5069 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5071 ret = -TARGET_EFAULT;
5073 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5074 char real[PATH_MAX];
5075 temp = realpath(exec_path,real);
5076 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5077 snprintf((char *)p2, arg3, "%s", real);
5080 ret = get_errno(readlink(path(p), p2, arg3));
5082 unlock_user(p2, arg2, ret);
5083 unlock_user(p, arg1, 0);
5086 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5087 case TARGET_NR_readlinkat:
5090 p = lock_user_string(arg2);
5091 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5093 ret = -TARGET_EFAULT;
5095 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5096 unlock_user(p2, arg3, ret);
5097 unlock_user(p, arg2, 0);
5101 #ifdef TARGET_NR_uselib
5102 case TARGET_NR_uselib:
5105 #ifdef TARGET_NR_swapon
5106 case TARGET_NR_swapon:
5107 if (!(p = lock_user_string(arg1)))
5109 ret = get_errno(swapon(p, arg2));
5110 unlock_user(p, arg1, 0);
5113 case TARGET_NR_reboot:
5115 #ifdef TARGET_NR_readdir
5116 case TARGET_NR_readdir:
5119 #ifdef TARGET_NR_mmap
5120 case TARGET_NR_mmap:
5121 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5124 abi_ulong v1, v2, v3, v4, v5, v6;
5125 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5133 unlock_user(v, arg1, 0);
5134 ret = get_errno(target_mmap(v1, v2, v3,
5135 target_to_host_bitmask(v4, mmap_flags_tbl),
5139 ret = get_errno(target_mmap(arg1, arg2, arg3,
5140 target_to_host_bitmask(arg4, mmap_flags_tbl),
5146 #ifdef TARGET_NR_mmap2
5147 case TARGET_NR_mmap2:
5149 #define MMAP_SHIFT 12
5151 ret = get_errno(target_mmap(arg1, arg2, arg3,
5152 target_to_host_bitmask(arg4, mmap_flags_tbl),
5154 arg6 << MMAP_SHIFT));
5157 case TARGET_NR_munmap:
5158 ret = get_errno(target_munmap(arg1, arg2));
5160 case TARGET_NR_mprotect:
5161 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5163 #ifdef TARGET_NR_mremap
5164 case TARGET_NR_mremap:
5165 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5168 /* ??? msync/mlock/munlock are broken for softmmu. */
5169 #ifdef TARGET_NR_msync
5170 case TARGET_NR_msync:
5171 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5174 #ifdef TARGET_NR_mlock
5175 case TARGET_NR_mlock:
5176 ret = get_errno(mlock(g2h(arg1), arg2));
5179 #ifdef TARGET_NR_munlock
5180 case TARGET_NR_munlock:
5181 ret = get_errno(munlock(g2h(arg1), arg2));
5184 #ifdef TARGET_NR_mlockall
5185 case TARGET_NR_mlockall:
5186 ret = get_errno(mlockall(arg1));
5189 #ifdef TARGET_NR_munlockall
5190 case TARGET_NR_munlockall:
5191 ret = get_errno(munlockall());
5194 case TARGET_NR_truncate:
5195 if (!(p = lock_user_string(arg1)))
5197 ret = get_errno(truncate(p, arg2));
5198 unlock_user(p, arg1, 0);
5200 case TARGET_NR_ftruncate:
5201 ret = get_errno(ftruncate(arg1, arg2));
5203 case TARGET_NR_fchmod:
5204 ret = get_errno(fchmod(arg1, arg2));
5206 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5207 case TARGET_NR_fchmodat:
5208 if (!(p = lock_user_string(arg2)))
5210 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5211 unlock_user(p, arg2, 0);
5214 case TARGET_NR_getpriority:
5215 /* libc does special remapping of the return value of
5216 * sys_getpriority() so it's just easiest to call
5217 * sys_getpriority() directly rather than through libc. */
5218 ret = sys_getpriority(arg1, arg2);
5220 case TARGET_NR_setpriority:
5221 ret = get_errno(setpriority(arg1, arg2, arg3));
5223 #ifdef TARGET_NR_profil
5224 case TARGET_NR_profil:
5227 case TARGET_NR_statfs:
5228 if (!(p = lock_user_string(arg1)))
5230 ret = get_errno(statfs(path(p), &stfs));
5231 unlock_user(p, arg1, 0);
5233 if (!is_error(ret)) {
5234 struct target_statfs *target_stfs;
5236 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5238 __put_user(stfs.f_type, &target_stfs->f_type);
5239 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5240 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5241 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5242 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5243 __put_user(stfs.f_files, &target_stfs->f_files);
5244 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5245 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5246 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5247 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5248 unlock_user_struct(target_stfs, arg2, 1);
5251 case TARGET_NR_fstatfs:
5252 ret = get_errno(fstatfs(arg1, &stfs));
5253 goto convert_statfs;
5254 #ifdef TARGET_NR_statfs64
5255 case TARGET_NR_statfs64:
5256 if (!(p = lock_user_string(arg1)))
5258 ret = get_errno(statfs(path(p), &stfs));
5259 unlock_user(p, arg1, 0);
5261 if (!is_error(ret)) {
5262 struct target_statfs64 *target_stfs;
5264 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5266 __put_user(stfs.f_type, &target_stfs->f_type);
5267 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5268 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5269 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5270 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5271 __put_user(stfs.f_files, &target_stfs->f_files);
5272 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5273 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5274 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5275 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5276 unlock_user_struct(target_stfs, arg3, 1);
5279 case TARGET_NR_fstatfs64:
5280 ret = get_errno(fstatfs(arg1, &stfs));
5281 goto convert_statfs64;
5283 #ifdef TARGET_NR_ioperm
5284 case TARGET_NR_ioperm:
5287 #ifdef TARGET_NR_socketcall
5288 case TARGET_NR_socketcall:
5289 ret = do_socketcall(arg1, arg2);
5292 #ifdef TARGET_NR_accept
5293 case TARGET_NR_accept:
5294 ret = do_accept(arg1, arg2, arg3);
5297 #ifdef TARGET_NR_bind
5298 case TARGET_NR_bind:
5299 ret = do_bind(arg1, arg2, arg3);
5302 #ifdef TARGET_NR_connect
5303 case TARGET_NR_connect:
5304 ret = do_connect(arg1, arg2, arg3);
5307 #ifdef TARGET_NR_getpeername
5308 case TARGET_NR_getpeername:
5309 ret = do_getpeername(arg1, arg2, arg3);
5312 #ifdef TARGET_NR_getsockname
5313 case TARGET_NR_getsockname:
5314 ret = do_getsockname(arg1, arg2, arg3);
5317 #ifdef TARGET_NR_getsockopt
5318 case TARGET_NR_getsockopt:
5319 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5322 #ifdef TARGET_NR_listen
5323 case TARGET_NR_listen:
5324 ret = get_errno(listen(arg1, arg2));
5327 #ifdef TARGET_NR_recv
5328 case TARGET_NR_recv:
5329 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5332 #ifdef TARGET_NR_recvfrom
5333 case TARGET_NR_recvfrom:
5334 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5337 #ifdef TARGET_NR_recvmsg
5338 case TARGET_NR_recvmsg:
5339 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5342 #ifdef TARGET_NR_send
5343 case TARGET_NR_send:
5344 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5347 #ifdef TARGET_NR_sendmsg
5348 case TARGET_NR_sendmsg:
5349 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5352 #ifdef TARGET_NR_sendto
5353 case TARGET_NR_sendto:
5354 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5357 #ifdef TARGET_NR_shutdown
5358 case TARGET_NR_shutdown:
5359 ret = get_errno(shutdown(arg1, arg2));
5362 #ifdef TARGET_NR_socket
5363 case TARGET_NR_socket:
5364 ret = do_socket(arg1, arg2, arg3);
5367 #ifdef TARGET_NR_socketpair
5368 case TARGET_NR_socketpair:
5369 ret = do_socketpair(arg1, arg2, arg3, arg4);
5372 #ifdef TARGET_NR_setsockopt
5373 case TARGET_NR_setsockopt:
5374 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5378 case TARGET_NR_syslog:
5379 if (!(p = lock_user_string(arg2)))
5381 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5382 unlock_user(p, arg2, 0);
5385 case TARGET_NR_setitimer:
5387 struct itimerval value, ovalue, *pvalue;
5391 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5392 || copy_from_user_timeval(&pvalue->it_value,
5393 arg2 + sizeof(struct target_timeval)))
5398 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5399 if (!is_error(ret) && arg3) {
5400 if (copy_to_user_timeval(arg3,
5401 &ovalue.it_interval)
5402 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5408 case TARGET_NR_getitimer:
5410 struct itimerval value;
5412 ret = get_errno(getitimer(arg1, &value));
5413 if (!is_error(ret) && arg2) {
5414 if (copy_to_user_timeval(arg2,
5416 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5422 case TARGET_NR_stat:
5423 if (!(p = lock_user_string(arg1)))
5425 ret = get_errno(stat(path(p), &st));
5426 unlock_user(p, arg1, 0);
5428 case TARGET_NR_lstat:
5429 if (!(p = lock_user_string(arg1)))
5431 ret = get_errno(lstat(path(p), &st));
5432 unlock_user(p, arg1, 0);
5434 case TARGET_NR_fstat:
5436 ret = get_errno(fstat(arg1, &st));
5438 if (!is_error(ret)) {
5439 struct target_stat *target_st;
5441 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5443 __put_user(st.st_dev, &target_st->st_dev);
5444 __put_user(st.st_ino, &target_st->st_ino);
5445 __put_user(st.st_mode, &target_st->st_mode);
5446 __put_user(st.st_uid, &target_st->st_uid);
5447 __put_user(st.st_gid, &target_st->st_gid);
5448 __put_user(st.st_nlink, &target_st->st_nlink);
5449 __put_user(st.st_rdev, &target_st->st_rdev);
5450 __put_user(st.st_size, &target_st->st_size);
5451 __put_user(st.st_blksize, &target_st->st_blksize);
5452 __put_user(st.st_blocks, &target_st->st_blocks);
5453 __put_user(st.st_atime, &target_st->target_st_atime);
5454 __put_user(st.st_mtime, &target_st->target_st_mtime);
5455 __put_user(st.st_ctime, &target_st->target_st_ctime);
5456 unlock_user_struct(target_st, arg2, 1);
5460 #ifdef TARGET_NR_olduname
5461 case TARGET_NR_olduname:
5464 #ifdef TARGET_NR_iopl
5465 case TARGET_NR_iopl:
5468 case TARGET_NR_vhangup:
5469 ret = get_errno(vhangup());
5471 #ifdef TARGET_NR_idle
5472 case TARGET_NR_idle:
5475 #ifdef TARGET_NR_syscall
5476 case TARGET_NR_syscall:
5477 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5480 case TARGET_NR_wait4:
5483 abi_long status_ptr = arg2;
5484 struct rusage rusage, *rusage_ptr;
5485 abi_ulong target_rusage = arg4;
5487 rusage_ptr = &rusage;
5490 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5491 if (!is_error(ret)) {
5493 status = host_to_target_waitstatus(status);
5494 if (put_user_s32(status, status_ptr))
5498 host_to_target_rusage(target_rusage, &rusage);
5502 #ifdef TARGET_NR_swapoff
5503 case TARGET_NR_swapoff:
5504 if (!(p = lock_user_string(arg1)))
5506 ret = get_errno(swapoff(p));
5507 unlock_user(p, arg1, 0);
5510 case TARGET_NR_sysinfo:
5512 struct target_sysinfo *target_value;
5513 struct sysinfo value;
5514 ret = get_errno(sysinfo(&value));
5515 if (!is_error(ret) && arg1)
5517 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5519 __put_user(value.uptime, &target_value->uptime);
5520 __put_user(value.loads[0], &target_value->loads[0]);
5521 __put_user(value.loads[1], &target_value->loads[1]);
5522 __put_user(value.loads[2], &target_value->loads[2]);
5523 __put_user(value.totalram, &target_value->totalram);
5524 __put_user(value.freeram, &target_value->freeram);
5525 __put_user(value.sharedram, &target_value->sharedram);
5526 __put_user(value.bufferram, &target_value->bufferram);
5527 __put_user(value.totalswap, &target_value->totalswap);
5528 __put_user(value.freeswap, &target_value->freeswap);
5529 __put_user(value.procs, &target_value->procs);
5530 __put_user(value.totalhigh, &target_value->totalhigh);
5531 __put_user(value.freehigh, &target_value->freehigh);
5532 __put_user(value.mem_unit, &target_value->mem_unit);
5533 unlock_user_struct(target_value, arg1, 1);
5537 #ifdef TARGET_NR_ipc
5539 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5542 #ifdef TARGET_NR_semget
5543 case TARGET_NR_semget:
5544 ret = get_errno(semget(arg1, arg2, arg3));
5547 #ifdef TARGET_NR_semop
5548 case TARGET_NR_semop:
5549 ret = get_errno(do_semop(arg1, arg2, arg3));
5552 #ifdef TARGET_NR_semctl
5553 case TARGET_NR_semctl:
5554 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5557 #ifdef TARGET_NR_msgctl
5558 case TARGET_NR_msgctl:
5559 ret = do_msgctl(arg1, arg2, arg3);
5562 #ifdef TARGET_NR_msgget
5563 case TARGET_NR_msgget:
5564 ret = get_errno(msgget(arg1, arg2));
5567 #ifdef TARGET_NR_msgrcv
5568 case TARGET_NR_msgrcv:
5569 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5572 #ifdef TARGET_NR_msgsnd
5573 case TARGET_NR_msgsnd:
5574 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5577 #ifdef TARGET_NR_shmget
5578 case TARGET_NR_shmget:
5579 ret = get_errno(shmget(arg1, arg2, arg3));
5582 #ifdef TARGET_NR_shmctl
5583 case TARGET_NR_shmctl:
5584 ret = do_shmctl(arg1, arg2, arg3);
5587 #ifdef TARGET_NR_shmat
5588 case TARGET_NR_shmat:
5589 ret = do_shmat(arg1, arg2, arg3);
5592 #ifdef TARGET_NR_shmdt
5593 case TARGET_NR_shmdt:
5594 ret = do_shmdt(arg1);
5597 case TARGET_NR_fsync:
5598 ret = get_errno(fsync(arg1));
5600 case TARGET_NR_clone:
5601 #if defined(TARGET_SH4)
5602 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5603 #elif defined(TARGET_CRIS)
5604 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5606 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5609 #ifdef __NR_exit_group
5610 /* new thread calls */
5611 case TARGET_NR_exit_group:
5615 gdb_exit(cpu_env, arg1);
5616 ret = get_errno(exit_group(arg1));
5619 case TARGET_NR_setdomainname:
5620 if (!(p = lock_user_string(arg1)))
5622 ret = get_errno(setdomainname(p, arg2));
5623 unlock_user(p, arg1, 0);
5625 case TARGET_NR_uname:
5626 /* no need to transcode because we use the linux syscall */
5628 struct new_utsname * buf;
5630 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5632 ret = get_errno(sys_uname(buf));
5633 if (!is_error(ret)) {
5634 /* Overrite the native machine name with whatever is being
5636 strcpy (buf->machine, UNAME_MACHINE);
5637 /* Allow the user to override the reported release. */
5638 if (qemu_uname_release && *qemu_uname_release)
5639 strcpy (buf->release, qemu_uname_release);
5641 unlock_user_struct(buf, arg1, 1);
5645 case TARGET_NR_modify_ldt:
5646 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5648 #if !defined(TARGET_X86_64)
5649 case TARGET_NR_vm86old:
5651 case TARGET_NR_vm86:
5652 ret = do_vm86(cpu_env, arg1, arg2);
5656 case TARGET_NR_adjtimex:
5658 #ifdef TARGET_NR_create_module
5659 case TARGET_NR_create_module:
5661 case TARGET_NR_init_module:
5662 case TARGET_NR_delete_module:
5663 #ifdef TARGET_NR_get_kernel_syms
5664 case TARGET_NR_get_kernel_syms:
5667 case TARGET_NR_quotactl:
5669 case TARGET_NR_getpgid:
5670 ret = get_errno(getpgid(arg1));
5672 case TARGET_NR_fchdir:
5673 ret = get_errno(fchdir(arg1));
5675 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5676 case TARGET_NR_bdflush:
5679 #ifdef TARGET_NR_sysfs
5680 case TARGET_NR_sysfs:
5683 case TARGET_NR_personality:
5684 ret = get_errno(personality(arg1));
5686 #ifdef TARGET_NR_afs_syscall
5687 case TARGET_NR_afs_syscall:
5690 #ifdef TARGET_NR__llseek /* Not on alpha */
5691 case TARGET_NR__llseek:
5693 #if defined (__x86_64__)
5694 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5695 if (put_user_s64(ret, arg4))
5699 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5700 if (put_user_s64(res, arg4))
5706 case TARGET_NR_getdents:
5707 #if TARGET_ABI_BITS != 32
5709 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5711 struct target_dirent *target_dirp;
5712 struct linux_dirent *dirp;
5713 abi_long count = arg3;
5715 dirp = malloc(count);
5717 ret = -TARGET_ENOMEM;
5721 ret = get_errno(sys_getdents(arg1, dirp, count));
5722 if (!is_error(ret)) {
5723 struct linux_dirent *de;
5724 struct target_dirent *tde;
5726 int reclen, treclen;
5727 int count1, tnamelen;
5731 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5735 reclen = de->d_reclen;
5736 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5737 tde->d_reclen = tswap16(treclen);
5738 tde->d_ino = tswapl(de->d_ino);
5739 tde->d_off = tswapl(de->d_off);
5740 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5743 /* XXX: may not be correct */
5744 pstrcpy(tde->d_name, tnamelen, de->d_name);
5745 de = (struct linux_dirent *)((char *)de + reclen);
5747 tde = (struct target_dirent *)((char *)tde + treclen);
5751 unlock_user(target_dirp, arg2, ret);
5757 struct linux_dirent *dirp;
5758 abi_long count = arg3;
5760 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5762 ret = get_errno(sys_getdents(arg1, dirp, count));
5763 if (!is_error(ret)) {
5764 struct linux_dirent *de;
5769 reclen = de->d_reclen;
5772 de->d_reclen = tswap16(reclen);
5773 tswapls(&de->d_ino);
5774 tswapls(&de->d_off);
5775 de = (struct linux_dirent *)((char *)de + reclen);
5779 unlock_user(dirp, arg2, ret);
5783 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5784 case TARGET_NR_getdents64:
5786 struct linux_dirent64 *dirp;
5787 abi_long count = arg3;
5788 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5790 ret = get_errno(sys_getdents64(arg1, dirp, count));
5791 if (!is_error(ret)) {
5792 struct linux_dirent64 *de;
5797 reclen = de->d_reclen;
5800 de->d_reclen = tswap16(reclen);
5801 tswap64s((uint64_t *)&de->d_ino);
5802 tswap64s((uint64_t *)&de->d_off);
5803 de = (struct linux_dirent64 *)((char *)de + reclen);
5807 unlock_user(dirp, arg2, ret);
5810 #endif /* TARGET_NR_getdents64 */
5811 #ifdef TARGET_NR__newselect
5812 case TARGET_NR__newselect:
5813 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5816 #ifdef TARGET_NR_poll
5817 case TARGET_NR_poll:
5819 struct target_pollfd *target_pfd;
5820 unsigned int nfds = arg2;
5825 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5828 pfd = alloca(sizeof(struct pollfd) * nfds);
5829 for(i = 0; i < nfds; i++) {
5830 pfd[i].fd = tswap32(target_pfd[i].fd);
5831 pfd[i].events = tswap16(target_pfd[i].events);
5833 ret = get_errno(poll(pfd, nfds, timeout));
5834 if (!is_error(ret)) {
5835 for(i = 0; i < nfds; i++) {
5836 target_pfd[i].revents = tswap16(pfd[i].revents);
5838 ret += nfds * (sizeof(struct target_pollfd)
5839 - sizeof(struct pollfd));
5841 unlock_user(target_pfd, arg1, ret);
5845 case TARGET_NR_flock:
5846 /* NOTE: the flock constant seems to be the same for every
5848 ret = get_errno(flock(arg1, arg2));
5850 case TARGET_NR_readv:
5855 vec = alloca(count * sizeof(struct iovec));
5856 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5858 ret = get_errno(readv(arg1, vec, count));
5859 unlock_iovec(vec, arg2, count, 1);
5862 case TARGET_NR_writev:
5867 vec = alloca(count * sizeof(struct iovec));
5868 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5870 ret = get_errno(writev(arg1, vec, count));
5871 unlock_iovec(vec, arg2, count, 0);
5874 case TARGET_NR_getsid:
5875 ret = get_errno(getsid(arg1));
5877 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5878 case TARGET_NR_fdatasync:
5879 ret = get_errno(fdatasync(arg1));
5882 case TARGET_NR__sysctl:
5883 /* We don't implement this, but ENOTDIR is always a safe
5885 ret = -TARGET_ENOTDIR;
5887 case TARGET_NR_sched_setparam:
5889 struct sched_param *target_schp;
5890 struct sched_param schp;
5892 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5894 schp.sched_priority = tswap32(target_schp->sched_priority);
5895 unlock_user_struct(target_schp, arg2, 0);
5896 ret = get_errno(sched_setparam(arg1, &schp));
5899 case TARGET_NR_sched_getparam:
5901 struct sched_param *target_schp;
5902 struct sched_param schp;
5903 ret = get_errno(sched_getparam(arg1, &schp));
5904 if (!is_error(ret)) {
5905 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5907 target_schp->sched_priority = tswap32(schp.sched_priority);
5908 unlock_user_struct(target_schp, arg2, 1);
5912 case TARGET_NR_sched_setscheduler:
5914 struct sched_param *target_schp;
5915 struct sched_param schp;
5916 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5918 schp.sched_priority = tswap32(target_schp->sched_priority);
5919 unlock_user_struct(target_schp, arg3, 0);
5920 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5923 case TARGET_NR_sched_getscheduler:
5924 ret = get_errno(sched_getscheduler(arg1));
5926 case TARGET_NR_sched_yield:
5927 ret = get_errno(sched_yield());
5929 case TARGET_NR_sched_get_priority_max:
5930 ret = get_errno(sched_get_priority_max(arg1));
5932 case TARGET_NR_sched_get_priority_min:
5933 ret = get_errno(sched_get_priority_min(arg1));
5935 case TARGET_NR_sched_rr_get_interval:
5938 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5939 if (!is_error(ret)) {
5940 host_to_target_timespec(arg2, &ts);
5944 case TARGET_NR_nanosleep:
5946 struct timespec req, rem;
5947 target_to_host_timespec(&req, arg1);
5948 ret = get_errno(nanosleep(&req, &rem));
5949 if (is_error(ret) && arg2) {
5950 host_to_target_timespec(arg2, &rem);
5954 #ifdef TARGET_NR_query_module
5955 case TARGET_NR_query_module:
5958 #ifdef TARGET_NR_nfsservctl
5959 case TARGET_NR_nfsservctl:
5962 case TARGET_NR_prctl:
5965 case PR_GET_PDEATHSIG:
5968 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5969 if (!is_error(ret) && arg2
5970 && put_user_ual(deathsig, arg2))
5975 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5979 #ifdef TARGET_NR_arch_prctl
5980 case TARGET_NR_arch_prctl:
5981 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5982 ret = do_arch_prctl(cpu_env, arg1, arg2);
5988 #ifdef TARGET_NR_pread
5989 case TARGET_NR_pread:
5991 if (((CPUARMState *)cpu_env)->eabi)
5994 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5996 ret = get_errno(pread(arg1, p, arg3, arg4));
5997 unlock_user(p, arg2, ret);
5999 case TARGET_NR_pwrite:
6001 if (((CPUARMState *)cpu_env)->eabi)
6004 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6006 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6007 unlock_user(p, arg2, 0);
6010 #ifdef TARGET_NR_pread64
6011 case TARGET_NR_pread64:
6012 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6014 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6015 unlock_user(p, arg2, ret);
6017 case TARGET_NR_pwrite64:
6018 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6020 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6021 unlock_user(p, arg2, 0);
6024 case TARGET_NR_getcwd:
6025 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6027 ret = get_errno(sys_getcwd1(p, arg2));
6028 unlock_user(p, arg1, ret);
6030 case TARGET_NR_capget:
6032 case TARGET_NR_capset:
6034 case TARGET_NR_sigaltstack:
6035 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6036 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6037 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6042 case TARGET_NR_sendfile:
6044 #ifdef TARGET_NR_getpmsg
6045 case TARGET_NR_getpmsg:
6048 #ifdef TARGET_NR_putpmsg
6049 case TARGET_NR_putpmsg:
6052 #ifdef TARGET_NR_vfork
6053 case TARGET_NR_vfork:
6054 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6058 #ifdef TARGET_NR_ugetrlimit
6059 case TARGET_NR_ugetrlimit:
6062 ret = get_errno(getrlimit(arg1, &rlim));
6063 if (!is_error(ret)) {
6064 struct target_rlimit *target_rlim;
6065 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6067 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
6068 target_rlim->rlim_max = tswapl(rlim.rlim_max);
6069 unlock_user_struct(target_rlim, arg2, 1);
6074 #ifdef TARGET_NR_truncate64
6075 case TARGET_NR_truncate64:
6076 if (!(p = lock_user_string(arg1)))
6078 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6079 unlock_user(p, arg1, 0);
6082 #ifdef TARGET_NR_ftruncate64
6083 case TARGET_NR_ftruncate64:
6084 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6087 #ifdef TARGET_NR_stat64
6088 case TARGET_NR_stat64:
6089 if (!(p = lock_user_string(arg1)))
6091 ret = get_errno(stat(path(p), &st));
6092 unlock_user(p, arg1, 0);
6094 ret = host_to_target_stat64(cpu_env, arg2, &st);
6097 #ifdef TARGET_NR_lstat64
6098 case TARGET_NR_lstat64:
6099 if (!(p = lock_user_string(arg1)))
6101 ret = get_errno(lstat(path(p), &st));
6102 unlock_user(p, arg1, 0);
6104 ret = host_to_target_stat64(cpu_env, arg2, &st);
6107 #ifdef TARGET_NR_fstat64
6108 case TARGET_NR_fstat64:
6109 ret = get_errno(fstat(arg1, &st));
6111 ret = host_to_target_stat64(cpu_env, arg2, &st);
6114 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6115 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6116 #ifdef TARGET_NR_fstatat64
6117 case TARGET_NR_fstatat64:
6119 #ifdef TARGET_NR_newfstatat
6120 case TARGET_NR_newfstatat:
6122 if (!(p = lock_user_string(arg2)))
6124 #ifdef __NR_fstatat64
6125 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6127 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6130 ret = host_to_target_stat64(cpu_env, arg3, &st);
6134 case TARGET_NR_lchown:
6135 if (!(p = lock_user_string(arg1)))
6137 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6138 unlock_user(p, arg1, 0);
6140 case TARGET_NR_getuid:
6141 ret = get_errno(high2lowuid(getuid()));
6143 case TARGET_NR_getgid:
6144 ret = get_errno(high2lowgid(getgid()));
6146 case TARGET_NR_geteuid:
6147 ret = get_errno(high2lowuid(geteuid()));
6149 case TARGET_NR_getegid:
6150 ret = get_errno(high2lowgid(getegid()));
6152 case TARGET_NR_setreuid:
6153 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6155 case TARGET_NR_setregid:
6156 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6158 case TARGET_NR_getgroups:
6160 int gidsetsize = arg1;
6161 uint16_t *target_grouplist;
6165 grouplist = alloca(gidsetsize * sizeof(gid_t));
6166 ret = get_errno(getgroups(gidsetsize, grouplist));
6167 if (gidsetsize == 0)
6169 if (!is_error(ret)) {
6170 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6171 if (!target_grouplist)
6173 for(i = 0;i < ret; i++)
6174 target_grouplist[i] = tswap16(grouplist[i]);
6175 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6179 case TARGET_NR_setgroups:
6181 int gidsetsize = arg1;
6182 uint16_t *target_grouplist;
6186 grouplist = alloca(gidsetsize * sizeof(gid_t));
6187 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6188 if (!target_grouplist) {
6189 ret = -TARGET_EFAULT;
6192 for(i = 0;i < gidsetsize; i++)
6193 grouplist[i] = tswap16(target_grouplist[i]);
6194 unlock_user(target_grouplist, arg2, 0);
6195 ret = get_errno(setgroups(gidsetsize, grouplist));
6198 case TARGET_NR_fchown:
6199 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6201 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6202 case TARGET_NR_fchownat:
6203 if (!(p = lock_user_string(arg2)))
6205 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6206 unlock_user(p, arg2, 0);
6209 #ifdef TARGET_NR_setresuid
6210 case TARGET_NR_setresuid:
6211 ret = get_errno(setresuid(low2highuid(arg1),
6213 low2highuid(arg3)));
6216 #ifdef TARGET_NR_getresuid
6217 case TARGET_NR_getresuid:
6219 uid_t ruid, euid, suid;
6220 ret = get_errno(getresuid(&ruid, &euid, &suid));
6221 if (!is_error(ret)) {
6222 if (put_user_u16(high2lowuid(ruid), arg1)
6223 || put_user_u16(high2lowuid(euid), arg2)
6224 || put_user_u16(high2lowuid(suid), arg3))
6230 #ifdef TARGET_NR_getresgid
6231 case TARGET_NR_setresgid:
6232 ret = get_errno(setresgid(low2highgid(arg1),
6234 low2highgid(arg3)));
6237 #ifdef TARGET_NR_getresgid
6238 case TARGET_NR_getresgid:
6240 gid_t rgid, egid, sgid;
6241 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6242 if (!is_error(ret)) {
6243 if (put_user_u16(high2lowgid(rgid), arg1)
6244 || put_user_u16(high2lowgid(egid), arg2)
6245 || put_user_u16(high2lowgid(sgid), arg3))
6251 case TARGET_NR_chown:
6252 if (!(p = lock_user_string(arg1)))
6254 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6255 unlock_user(p, arg1, 0);
6257 case TARGET_NR_setuid:
6258 ret = get_errno(setuid(low2highuid(arg1)));
6260 case TARGET_NR_setgid:
6261 ret = get_errno(setgid(low2highgid(arg1)));
6263 case TARGET_NR_setfsuid:
6264 ret = get_errno(setfsuid(arg1));
6266 case TARGET_NR_setfsgid:
6267 ret = get_errno(setfsgid(arg1));
6269 #endif /* USE_UID16 */
6271 #ifdef TARGET_NR_lchown32
6272 case TARGET_NR_lchown32:
6273 if (!(p = lock_user_string(arg1)))
6275 ret = get_errno(lchown(p, arg2, arg3));
6276 unlock_user(p, arg1, 0);
6279 #ifdef TARGET_NR_getuid32
6280 case TARGET_NR_getuid32:
6281 ret = get_errno(getuid());
6285 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6286 /* Alpha specific */
6287 case TARGET_NR_getxuid:
6291 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6293 ret = get_errno(getuid());
6296 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6297 /* Alpha specific */
6298 case TARGET_NR_getxgid:
6302 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6304 ret = get_errno(getgid());
6308 #ifdef TARGET_NR_getgid32
6309 case TARGET_NR_getgid32:
6310 ret = get_errno(getgid());
6313 #ifdef TARGET_NR_geteuid32
6314 case TARGET_NR_geteuid32:
6315 ret = get_errno(geteuid());
6318 #ifdef TARGET_NR_getegid32
6319 case TARGET_NR_getegid32:
6320 ret = get_errno(getegid());
6323 #ifdef TARGET_NR_setreuid32
6324 case TARGET_NR_setreuid32:
6325 ret = get_errno(setreuid(arg1, arg2));
6328 #ifdef TARGET_NR_setregid32
6329 case TARGET_NR_setregid32:
6330 ret = get_errno(setregid(arg1, arg2));
6333 #ifdef TARGET_NR_getgroups32
6334 case TARGET_NR_getgroups32:
6336 int gidsetsize = arg1;
6337 uint32_t *target_grouplist;
6341 grouplist = alloca(gidsetsize * sizeof(gid_t));
6342 ret = get_errno(getgroups(gidsetsize, grouplist));
6343 if (gidsetsize == 0)
6345 if (!is_error(ret)) {
6346 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6347 if (!target_grouplist) {
6348 ret = -TARGET_EFAULT;
6351 for(i = 0;i < ret; i++)
6352 target_grouplist[i] = tswap32(grouplist[i]);
6353 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6358 #ifdef TARGET_NR_setgroups32
6359 case TARGET_NR_setgroups32:
6361 int gidsetsize = arg1;
6362 uint32_t *target_grouplist;
6366 grouplist = alloca(gidsetsize * sizeof(gid_t));
6367 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6368 if (!target_grouplist) {
6369 ret = -TARGET_EFAULT;
6372 for(i = 0;i < gidsetsize; i++)
6373 grouplist[i] = tswap32(target_grouplist[i]);
6374 unlock_user(target_grouplist, arg2, 0);
6375 ret = get_errno(setgroups(gidsetsize, grouplist));
6379 #ifdef TARGET_NR_fchown32
6380 case TARGET_NR_fchown32:
6381 ret = get_errno(fchown(arg1, arg2, arg3));
6384 #ifdef TARGET_NR_setresuid32
6385 case TARGET_NR_setresuid32:
6386 ret = get_errno(setresuid(arg1, arg2, arg3));
6389 #ifdef TARGET_NR_getresuid32
6390 case TARGET_NR_getresuid32:
6392 uid_t ruid, euid, suid;
6393 ret = get_errno(getresuid(&ruid, &euid, &suid));
6394 if (!is_error(ret)) {
6395 if (put_user_u32(ruid, arg1)
6396 || put_user_u32(euid, arg2)
6397 || put_user_u32(suid, arg3))
6403 #ifdef TARGET_NR_setresgid32
6404 case TARGET_NR_setresgid32:
6405 ret = get_errno(setresgid(arg1, arg2, arg3));
6408 #ifdef TARGET_NR_getresgid32
6409 case TARGET_NR_getresgid32:
6411 gid_t rgid, egid, sgid;
6412 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6413 if (!is_error(ret)) {
6414 if (put_user_u32(rgid, arg1)
6415 || put_user_u32(egid, arg2)
6416 || put_user_u32(sgid, arg3))
6422 #ifdef TARGET_NR_chown32
6423 case TARGET_NR_chown32:
6424 if (!(p = lock_user_string(arg1)))
6426 ret = get_errno(chown(p, arg2, arg3));
6427 unlock_user(p, arg1, 0);
6430 #ifdef TARGET_NR_setuid32
6431 case TARGET_NR_setuid32:
6432 ret = get_errno(setuid(arg1));
6435 #ifdef TARGET_NR_setgid32
6436 case TARGET_NR_setgid32:
6437 ret = get_errno(setgid(arg1));
6440 #ifdef TARGET_NR_setfsuid32
6441 case TARGET_NR_setfsuid32:
6442 ret = get_errno(setfsuid(arg1));
6445 #ifdef TARGET_NR_setfsgid32
6446 case TARGET_NR_setfsgid32:
6447 ret = get_errno(setfsgid(arg1));
6451 case TARGET_NR_pivot_root:
6453 #ifdef TARGET_NR_mincore
6454 case TARGET_NR_mincore:
6457 ret = -TARGET_EFAULT;
6458 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6460 if (!(p = lock_user_string(arg3)))
6462 ret = get_errno(mincore(a, arg2, p));
6463 unlock_user(p, arg3, ret);
6465 unlock_user(a, arg1, 0);
6469 #ifdef TARGET_NR_arm_fadvise64_64
6470 case TARGET_NR_arm_fadvise64_64:
6473 * arm_fadvise64_64 looks like fadvise64_64 but
6474 * with different argument order
6482 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6483 #ifdef TARGET_NR_fadvise64_64
6484 case TARGET_NR_fadvise64_64:
6486 /* This is a hint, so ignoring and returning success is ok. */
6490 #ifdef TARGET_NR_madvise
6491 case TARGET_NR_madvise:
6492 /* A straight passthrough may not be safe because qemu sometimes
6493 turns private flie-backed mappings into anonymous mappings.
6494 This will break MADV_DONTNEED.
6495 This is a hint, so ignoring and returning success is ok. */
6499 #if TARGET_ABI_BITS == 32
6500 case TARGET_NR_fcntl64:
6504 struct target_flock64 *target_fl;
6506 struct target_eabi_flock64 *target_efl;
6510 case TARGET_F_GETLK64:
6513 case TARGET_F_SETLK64:
6516 case TARGET_F_SETLKW64:
6525 case TARGET_F_GETLK64:
6527 if (((CPUARMState *)cpu_env)->eabi) {
6528 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6530 fl.l_type = tswap16(target_efl->l_type);
6531 fl.l_whence = tswap16(target_efl->l_whence);
6532 fl.l_start = tswap64(target_efl->l_start);
6533 fl.l_len = tswap64(target_efl->l_len);
6534 fl.l_pid = tswapl(target_efl->l_pid);
6535 unlock_user_struct(target_efl, arg3, 0);
6539 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6541 fl.l_type = tswap16(target_fl->l_type);
6542 fl.l_whence = tswap16(target_fl->l_whence);
6543 fl.l_start = tswap64(target_fl->l_start);
6544 fl.l_len = tswap64(target_fl->l_len);
6545 fl.l_pid = tswapl(target_fl->l_pid);
6546 unlock_user_struct(target_fl, arg3, 0);
6548 ret = get_errno(fcntl(arg1, cmd, &fl));
6551 if (((CPUARMState *)cpu_env)->eabi) {
6552 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6554 target_efl->l_type = tswap16(fl.l_type);
6555 target_efl->l_whence = tswap16(fl.l_whence);
6556 target_efl->l_start = tswap64(fl.l_start);
6557 target_efl->l_len = tswap64(fl.l_len);
6558 target_efl->l_pid = tswapl(fl.l_pid);
6559 unlock_user_struct(target_efl, arg3, 1);
6563 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6565 target_fl->l_type = tswap16(fl.l_type);
6566 target_fl->l_whence = tswap16(fl.l_whence);
6567 target_fl->l_start = tswap64(fl.l_start);
6568 target_fl->l_len = tswap64(fl.l_len);
6569 target_fl->l_pid = tswapl(fl.l_pid);
6570 unlock_user_struct(target_fl, arg3, 1);
6575 case TARGET_F_SETLK64:
6576 case TARGET_F_SETLKW64:
6578 if (((CPUARMState *)cpu_env)->eabi) {
6579 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6581 fl.l_type = tswap16(target_efl->l_type);
6582 fl.l_whence = tswap16(target_efl->l_whence);
6583 fl.l_start = tswap64(target_efl->l_start);
6584 fl.l_len = tswap64(target_efl->l_len);
6585 fl.l_pid = tswapl(target_efl->l_pid);
6586 unlock_user_struct(target_efl, arg3, 0);
6590 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6592 fl.l_type = tswap16(target_fl->l_type);
6593 fl.l_whence = tswap16(target_fl->l_whence);
6594 fl.l_start = tswap64(target_fl->l_start);
6595 fl.l_len = tswap64(target_fl->l_len);
6596 fl.l_pid = tswapl(target_fl->l_pid);
6597 unlock_user_struct(target_fl, arg3, 0);
6599 ret = get_errno(fcntl(arg1, cmd, &fl));
6602 ret = do_fcntl(arg1, arg2, arg3);
6608 #ifdef TARGET_NR_cacheflush
6609 case TARGET_NR_cacheflush:
6610 /* self-modifying code is handled automatically, so nothing needed */
6614 #ifdef TARGET_NR_security
6615 case TARGET_NR_security:
6618 #ifdef TARGET_NR_getpagesize
6619 case TARGET_NR_getpagesize:
6620 ret = TARGET_PAGE_SIZE;
6623 case TARGET_NR_gettid:
6624 ret = get_errno(gettid());
6626 #ifdef TARGET_NR_readahead
6627 case TARGET_NR_readahead:
6628 #if TARGET_ABI_BITS == 32
6630 if (((CPUARMState *)cpu_env)->eabi)
6637 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6639 ret = get_errno(readahead(arg1, arg2, arg3));
6643 #ifdef TARGET_NR_setxattr
6644 case TARGET_NR_setxattr:
6645 case TARGET_NR_lsetxattr:
6646 case TARGET_NR_fsetxattr:
6647 case TARGET_NR_getxattr:
6648 case TARGET_NR_lgetxattr:
6649 case TARGET_NR_fgetxattr:
6650 case TARGET_NR_listxattr:
6651 case TARGET_NR_llistxattr:
6652 case TARGET_NR_flistxattr:
6653 case TARGET_NR_removexattr:
6654 case TARGET_NR_lremovexattr:
6655 case TARGET_NR_fremovexattr:
6656 ret = -TARGET_EOPNOTSUPP;
6659 #ifdef TARGET_NR_set_thread_area
6660 case TARGET_NR_set_thread_area:
6661 #if defined(TARGET_MIPS)
6662 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6665 #elif defined(TARGET_CRIS)
6667 ret = -TARGET_EINVAL;
6669 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6673 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6674 ret = do_set_thread_area(cpu_env, arg1);
6677 goto unimplemented_nowarn;
6680 #ifdef TARGET_NR_get_thread_area
6681 case TARGET_NR_get_thread_area:
6682 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6683 ret = do_get_thread_area(cpu_env, arg1);
6685 goto unimplemented_nowarn;
6688 #ifdef TARGET_NR_getdomainname
6689 case TARGET_NR_getdomainname:
6690 goto unimplemented_nowarn;
6693 #ifdef TARGET_NR_clock_gettime
6694 case TARGET_NR_clock_gettime:
6697 ret = get_errno(clock_gettime(arg1, &ts));
6698 if (!is_error(ret)) {
6699 host_to_target_timespec(arg2, &ts);
6704 #ifdef TARGET_NR_clock_getres
6705 case TARGET_NR_clock_getres:
6708 ret = get_errno(clock_getres(arg1, &ts));
6709 if (!is_error(ret)) {
6710 host_to_target_timespec(arg2, &ts);
6715 #ifdef TARGET_NR_clock_nanosleep
6716 case TARGET_NR_clock_nanosleep:
6719 target_to_host_timespec(&ts, arg3);
6720 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6722 host_to_target_timespec(arg4, &ts);
6727 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6728 case TARGET_NR_set_tid_address:
6729 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6733 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6734 case TARGET_NR_tkill:
6735 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6739 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6740 case TARGET_NR_tgkill:
6741 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6742 target_to_host_signal(arg3)));
6746 #ifdef TARGET_NR_set_robust_list
6747 case TARGET_NR_set_robust_list:
6748 goto unimplemented_nowarn;
6751 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6752 case TARGET_NR_utimensat:
6754 struct timespec *tsp, ts[2];
6758 target_to_host_timespec(ts, arg3);
6759 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6763 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
6765 if (!(p = lock_user_string(arg2))) {
6766 ret = -TARGET_EFAULT;
6769 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
6770 unlock_user(p, arg2, 0);
6775 #if defined(USE_NPTL)
6776 case TARGET_NR_futex:
6777 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6780 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6781 case TARGET_NR_inotify_init:
6782 ret = get_errno(sys_inotify_init());
6785 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6786 case TARGET_NR_inotify_add_watch:
6787 p = lock_user_string(arg2);
6788 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6789 unlock_user(p, arg2, 0);
6792 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6793 case TARGET_NR_inotify_rm_watch:
6794 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6798 #ifdef TARGET_NR_mq_open
6799 case TARGET_NR_mq_open:
6801 struct mq_attr posix_mq_attr;
6803 p = lock_user_string(arg1 - 1);
6805 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6806 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6807 unlock_user (p, arg1, 0);
6811 case TARGET_NR_mq_unlink:
6812 p = lock_user_string(arg1 - 1);
6813 ret = get_errno(mq_unlink(p));
6814 unlock_user (p, arg1, 0);
6817 case TARGET_NR_mq_timedsend:
6821 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6823 target_to_host_timespec(&ts, arg5);
6824 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6825 host_to_target_timespec(arg5, &ts);
6828 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6829 unlock_user (p, arg2, arg3);
6833 case TARGET_NR_mq_timedreceive:
6838 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6840 target_to_host_timespec(&ts, arg5);
6841 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6842 host_to_target_timespec(arg5, &ts);
6845 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6846 unlock_user (p, arg2, arg3);
6848 put_user_u32(prio, arg4);
6852 /* Not implemented for now... */
6853 /* case TARGET_NR_mq_notify: */
6856 case TARGET_NR_mq_getsetattr:
6858 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6861 ret = mq_getattr(arg1, &posix_mq_attr_out);
6862 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6865 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6866 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6875 gemu_log("qemu: Unsupported syscall: %d\n", num);
6876 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6877 unimplemented_nowarn:
6879 ret = -TARGET_ENOSYS;
6884 gemu_log(" = %ld\n", ret);
6887 print_syscall_ret(num, ret);
6890 ret = -TARGET_EFAULT;