4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
81 #include "linux_loop.h"
84 #include "qemu-common.h"
87 #include <linux/futex.h>
88 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
89 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
91 /* XXX: Hardcode the above values. */
92 #define CLONE_NPTL_FLAGS2 0
97 //#include <linux/msdos_fs.h>
98 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
99 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
110 #define _syscall0(type,name) \
111 static type name (void) \
113 return syscall(__NR_##name); \
116 #define _syscall1(type,name,type1,arg1) \
117 static type name (type1 arg1) \
119 return syscall(__NR_##name, arg1); \
122 #define _syscall2(type,name,type1,arg1,type2,arg2) \
123 static type name (type1 arg1,type2 arg2) \
125 return syscall(__NR_##name, arg1, arg2); \
128 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
129 static type name (type1 arg1,type2 arg2,type3 arg3) \
131 return syscall(__NR_##name, arg1, arg2, arg3); \
134 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
135 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
137 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
140 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
148 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 type5,arg5,type6,arg6) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
157 #define __NR_sys_uname __NR_uname
158 #define __NR_sys_faccessat __NR_faccessat
159 #define __NR_sys_fchmodat __NR_fchmodat
160 #define __NR_sys_fchownat __NR_fchownat
161 #define __NR_sys_fstatat64 __NR_fstatat64
162 #define __NR_sys_futimesat __NR_futimesat
163 #define __NR_sys_getcwd1 __NR_getcwd
164 #define __NR_sys_getdents __NR_getdents
165 #define __NR_sys_getdents64 __NR_getdents64
166 #define __NR_sys_getpriority __NR_getpriority
167 #define __NR_sys_linkat __NR_linkat
168 #define __NR_sys_mkdirat __NR_mkdirat
169 #define __NR_sys_mknodat __NR_mknodat
170 #define __NR_sys_newfstatat __NR_newfstatat
171 #define __NR_sys_openat __NR_openat
172 #define __NR_sys_readlinkat __NR_readlinkat
173 #define __NR_sys_renameat __NR_renameat
174 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
175 #define __NR_sys_symlinkat __NR_symlinkat
176 #define __NR_sys_syslog __NR_syslog
177 #define __NR_sys_tgkill __NR_tgkill
178 #define __NR_sys_tkill __NR_tkill
179 #define __NR_sys_unlinkat __NR_unlinkat
180 #define __NR_sys_utimensat __NR_utimensat
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
187 #define __NR__llseek __NR_lseek
191 _syscall0(int, gettid)
193 /* This is a replacement for the host gettid() and must return a host
195 static int gettid(void) {
199 #if TARGET_ABI_BITS == 32
200 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
203 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
205 _syscall2(int, sys_getpriority, int, which, int, who);
206 #if !defined (__x86_64__)
207 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
208 loff_t *, res, uint, wh);
210 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
211 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
212 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
213 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
215 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
216 _syscall2(int,sys_tkill,int,tid,int,sig)
218 #ifdef __NR_exit_group
219 _syscall1(int,exit_group,int,error_code)
221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
222 _syscall1(int,set_tid_address,int *,tidptr)
224 #if defined(USE_NPTL)
225 #if defined(TARGET_NR_futex) && defined(__NR_futex)
226 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
227 const struct timespec *,timeout,int *,uaddr2,int,val3)
231 static bitmask_transtbl fcntl_flags_tbl[] = {
232 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
233 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
234 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
235 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
236 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
237 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
238 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
239 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
240 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
241 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
242 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
243 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
244 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
245 #if defined(O_DIRECT)
246 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
251 #define COPY_UTSNAME_FIELD(dest, src) \
253 /* __NEW_UTS_LEN doesn't include terminating null */ \
254 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
255 (dest)[__NEW_UTS_LEN] = '\0'; \
258 static int sys_uname(struct new_utsname *buf)
260 struct utsname uts_buf;
262 if (uname(&uts_buf) < 0)
266 * Just in case these have some differences, we
267 * translate utsname to new_utsname (which is the
268 * struct linux kernel uses).
271 bzero(buf, sizeof (*buf));
272 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
273 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
274 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
275 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
276 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
278 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
282 #undef COPY_UTSNAME_FIELD
285 static int sys_getcwd1(char *buf, size_t size)
287 if (getcwd(buf, size) == NULL) {
288 /* getcwd() sets errno */
291 return strlen(buf)+1;
296 * Host system seems to have atfile syscall stubs available. We
297 * now enable them one by one as specified by target syscall_nr.h.
300 #ifdef TARGET_NR_faccessat
301 static int sys_faccessat(int dirfd, const char *pathname, int mode)
303 return (faccessat(dirfd, pathname, mode, 0));
306 #ifdef TARGET_NR_fchmodat
307 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
309 return (fchmodat(dirfd, pathname, mode, 0));
312 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
313 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
314 gid_t group, int flags)
316 return (fchownat(dirfd, pathname, owner, group, flags));
319 #ifdef __NR_fstatat64
320 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
323 return (fstatat(dirfd, pathname, buf, flags));
326 #ifdef __NR_newfstatat
327 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
330 return (fstatat(dirfd, pathname, buf, flags));
333 #ifdef TARGET_NR_futimesat
334 static int sys_futimesat(int dirfd, const char *pathname,
335 const struct timeval times[2])
337 return (futimesat(dirfd, pathname, times));
340 #ifdef TARGET_NR_linkat
341 static int sys_linkat(int olddirfd, const char *oldpath,
342 int newdirfd, const char *newpath, int flags)
344 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
347 #ifdef TARGET_NR_mkdirat
348 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
350 return (mkdirat(dirfd, pathname, mode));
353 #ifdef TARGET_NR_mknodat
354 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
357 return (mknodat(dirfd, pathname, mode, dev));
360 #ifdef TARGET_NR_openat
361 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
364 * open(2) has extra parameter 'mode' when called with
367 if ((flags & O_CREAT) != 0) {
372 * Get the 'mode' parameter and translate it to
376 mode = va_arg(ap, mode_t);
377 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
380 return (openat(dirfd, pathname, flags, mode));
382 return (openat(dirfd, pathname, flags));
385 #ifdef TARGET_NR_readlinkat
386 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
388 return (readlinkat(dirfd, pathname, buf, bufsiz));
391 #ifdef TARGET_NR_renameat
392 static int sys_renameat(int olddirfd, const char *oldpath,
393 int newdirfd, const char *newpath)
395 return (renameat(olddirfd, oldpath, newdirfd, newpath));
398 #ifdef TARGET_NR_symlinkat
399 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
401 return (symlinkat(oldpath, newdirfd, newpath));
404 #ifdef TARGET_NR_unlinkat
405 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
407 return (unlinkat(dirfd, pathname, flags));
410 #else /* !CONFIG_ATFILE */
413 * Try direct syscalls instead
415 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
416 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
418 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
419 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
421 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
422 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
423 uid_t,owner,gid_t,group,int,flags)
425 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
426 defined(__NR_fstatat64)
427 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
428 struct stat *,buf,int,flags)
430 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
431 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
432 const struct timeval *,times)
434 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
435 defined(__NR_newfstatat)
436 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
437 struct stat *,buf,int,flags)
439 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
440 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
441 int,newdirfd,const char *,newpath,int,flags)
443 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
444 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
446 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
447 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
448 mode_t,mode,dev_t,dev)
450 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
451 defined(__NR_newfstatat)
452 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
460 char *,buf,size_t,bufsize)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
464 int,newdirfd,const char *,newpath)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat,const char *,oldpath,
468 int,newdirfd,const char *,newpath)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd, const char *pathname,
478 const struct timespec times[2], int flags)
480 if (pathname == NULL)
481 return futimens(dirfd, times);
483 return utimensat(dirfd, pathname, times, flags);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
488 const struct timespec *,tsp,int,flags)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
504 return (inotify_add_watch(fd, pathname, mask));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd, int32_t wd)
510 return (inotify_rm_watch(fd, wd));
514 /* Userspace can usually survive runtime without inotify */
515 #undef TARGET_NR_inotify_init
516 #undef TARGET_NR_inotify_add_watch
517 #undef TARGET_NR_inotify_rm_watch
518 #endif /* CONFIG_INOTIFY */
521 extern int personality(int);
522 extern int flock(int, int);
523 extern int setfsuid(int);
524 extern int setfsgid(int);
525 extern int setgroups(int, gid_t *);
527 #define ERRNO_TABLE_SIZE 1200
529 /* target_to_host_errno_table[] is initialized from
530 * host_to_target_errno_table[] in syscall_init(). */
531 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
535 * This list is the union of errno values overridden in asm-<arch>/errno.h
536 * minus the errnos that are not actually generic to all archs.
538 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
539 [EIDRM] = TARGET_EIDRM,
540 [ECHRNG] = TARGET_ECHRNG,
541 [EL2NSYNC] = TARGET_EL2NSYNC,
542 [EL3HLT] = TARGET_EL3HLT,
543 [EL3RST] = TARGET_EL3RST,
544 [ELNRNG] = TARGET_ELNRNG,
545 [EUNATCH] = TARGET_EUNATCH,
546 [ENOCSI] = TARGET_ENOCSI,
547 [EL2HLT] = TARGET_EL2HLT,
548 [EDEADLK] = TARGET_EDEADLK,
549 [ENOLCK] = TARGET_ENOLCK,
550 [EBADE] = TARGET_EBADE,
551 [EBADR] = TARGET_EBADR,
552 [EXFULL] = TARGET_EXFULL,
553 [ENOANO] = TARGET_ENOANO,
554 [EBADRQC] = TARGET_EBADRQC,
555 [EBADSLT] = TARGET_EBADSLT,
556 [EBFONT] = TARGET_EBFONT,
557 [ENOSTR] = TARGET_ENOSTR,
558 [ENODATA] = TARGET_ENODATA,
559 [ETIME] = TARGET_ETIME,
560 [ENOSR] = TARGET_ENOSR,
561 [ENONET] = TARGET_ENONET,
562 [ENOPKG] = TARGET_ENOPKG,
563 [EREMOTE] = TARGET_EREMOTE,
564 [ENOLINK] = TARGET_ENOLINK,
565 [EADV] = TARGET_EADV,
566 [ESRMNT] = TARGET_ESRMNT,
567 [ECOMM] = TARGET_ECOMM,
568 [EPROTO] = TARGET_EPROTO,
569 [EDOTDOT] = TARGET_EDOTDOT,
570 [EMULTIHOP] = TARGET_EMULTIHOP,
571 [EBADMSG] = TARGET_EBADMSG,
572 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
573 [EOVERFLOW] = TARGET_EOVERFLOW,
574 [ENOTUNIQ] = TARGET_ENOTUNIQ,
575 [EBADFD] = TARGET_EBADFD,
576 [EREMCHG] = TARGET_EREMCHG,
577 [ELIBACC] = TARGET_ELIBACC,
578 [ELIBBAD] = TARGET_ELIBBAD,
579 [ELIBSCN] = TARGET_ELIBSCN,
580 [ELIBMAX] = TARGET_ELIBMAX,
581 [ELIBEXEC] = TARGET_ELIBEXEC,
582 [EILSEQ] = TARGET_EILSEQ,
583 [ENOSYS] = TARGET_ENOSYS,
584 [ELOOP] = TARGET_ELOOP,
585 [ERESTART] = TARGET_ERESTART,
586 [ESTRPIPE] = TARGET_ESTRPIPE,
587 [ENOTEMPTY] = TARGET_ENOTEMPTY,
588 [EUSERS] = TARGET_EUSERS,
589 [ENOTSOCK] = TARGET_ENOTSOCK,
590 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
591 [EMSGSIZE] = TARGET_EMSGSIZE,
592 [EPROTOTYPE] = TARGET_EPROTOTYPE,
593 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
594 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
595 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
596 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
597 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
598 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
599 [EADDRINUSE] = TARGET_EADDRINUSE,
600 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
601 [ENETDOWN] = TARGET_ENETDOWN,
602 [ENETUNREACH] = TARGET_ENETUNREACH,
603 [ENETRESET] = TARGET_ENETRESET,
604 [ECONNABORTED] = TARGET_ECONNABORTED,
605 [ECONNRESET] = TARGET_ECONNRESET,
606 [ENOBUFS] = TARGET_ENOBUFS,
607 [EISCONN] = TARGET_EISCONN,
608 [ENOTCONN] = TARGET_ENOTCONN,
609 [EUCLEAN] = TARGET_EUCLEAN,
610 [ENOTNAM] = TARGET_ENOTNAM,
611 [ENAVAIL] = TARGET_ENAVAIL,
612 [EISNAM] = TARGET_EISNAM,
613 [EREMOTEIO] = TARGET_EREMOTEIO,
614 [ESHUTDOWN] = TARGET_ESHUTDOWN,
615 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
616 [ETIMEDOUT] = TARGET_ETIMEDOUT,
617 [ECONNREFUSED] = TARGET_ECONNREFUSED,
618 [EHOSTDOWN] = TARGET_EHOSTDOWN,
619 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
620 [EALREADY] = TARGET_EALREADY,
621 [EINPROGRESS] = TARGET_EINPROGRESS,
622 [ESTALE] = TARGET_ESTALE,
623 [ECANCELED] = TARGET_ECANCELED,
624 [ENOMEDIUM] = TARGET_ENOMEDIUM,
625 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
627 [ENOKEY] = TARGET_ENOKEY,
630 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
633 [EKEYREVOKED] = TARGET_EKEYREVOKED,
636 [EKEYREJECTED] = TARGET_EKEYREJECTED,
639 [EOWNERDEAD] = TARGET_EOWNERDEAD,
641 #ifdef ENOTRECOVERABLE
642 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
646 static inline int host_to_target_errno(int err)
648 if(host_to_target_errno_table[err])
649 return host_to_target_errno_table[err];
653 static inline int target_to_host_errno(int err)
655 if (target_to_host_errno_table[err])
656 return target_to_host_errno_table[err];
660 static inline abi_long get_errno(abi_long ret)
663 return -host_to_target_errno(errno);
668 static inline int is_error(abi_long ret)
670 return (abi_ulong)ret >= (abi_ulong)(-4096);
673 char *target_strerror(int err)
675 return strerror(target_to_host_errno(err));
678 static abi_ulong target_brk;
679 static abi_ulong target_original_brk;
681 void target_set_brk(abi_ulong new_brk)
683 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
686 /* do_brk() must return target values and target errnos. */
687 abi_long do_brk(abi_ulong new_brk)
690 abi_long mapped_addr;
695 if (new_brk < target_original_brk)
698 brk_page = HOST_PAGE_ALIGN(target_brk);
700 /* If the new brk is less than this, set it and we're done... */
701 if (new_brk < brk_page) {
702 target_brk = new_brk;
706 /* We need to allocate more memory after the brk... */
707 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
708 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
709 PROT_READ|PROT_WRITE,
710 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
712 if (!is_error(mapped_addr))
713 target_brk = new_brk;
718 static inline abi_long copy_from_user_fdset(fd_set *fds,
719 abi_ulong target_fds_addr,
723 abi_ulong b, *target_fds;
725 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
726 if (!(target_fds = lock_user(VERIFY_READ,
728 sizeof(abi_ulong) * nw,
730 return -TARGET_EFAULT;
734 for (i = 0; i < nw; i++) {
735 /* grab the abi_ulong */
736 __get_user(b, &target_fds[i]);
737 for (j = 0; j < TARGET_ABI_BITS; j++) {
738 /* check the bit inside the abi_ulong */
745 unlock_user(target_fds, target_fds_addr, 0);
750 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
756 abi_ulong *target_fds;
758 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
759 if (!(target_fds = lock_user(VERIFY_WRITE,
761 sizeof(abi_ulong) * nw,
763 return -TARGET_EFAULT;
766 for (i = 0; i < nw; i++) {
768 for (j = 0; j < TARGET_ABI_BITS; j++) {
769 v |= ((FD_ISSET(k, fds) != 0) << j);
772 __put_user(v, &target_fds[i]);
775 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
780 #if defined(__alpha__)
786 static inline abi_long host_to_target_clock_t(long ticks)
788 #if HOST_HZ == TARGET_HZ
791 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
795 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
796 const struct rusage *rusage)
798 struct target_rusage *target_rusage;
800 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
801 return -TARGET_EFAULT;
802 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
803 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
804 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
805 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
806 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
807 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
808 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
809 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
810 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
811 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
812 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
813 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
814 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
815 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
816 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
817 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
818 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
819 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
820 unlock_user_struct(target_rusage, target_addr, 1);
825 static inline abi_long copy_from_user_timeval(struct timeval *tv,
826 abi_ulong target_tv_addr)
828 struct target_timeval *target_tv;
830 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
831 return -TARGET_EFAULT;
833 __get_user(tv->tv_sec, &target_tv->tv_sec);
834 __get_user(tv->tv_usec, &target_tv->tv_usec);
836 unlock_user_struct(target_tv, target_tv_addr, 0);
841 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
842 const struct timeval *tv)
844 struct target_timeval *target_tv;
846 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
847 return -TARGET_EFAULT;
849 __put_user(tv->tv_sec, &target_tv->tv_sec);
850 __put_user(tv->tv_usec, &target_tv->tv_usec);
852 unlock_user_struct(target_tv, target_tv_addr, 1);
857 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
858 abi_ulong target_mq_attr_addr)
860 struct target_mq_attr *target_mq_attr;
862 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
863 target_mq_attr_addr, 1))
864 return -TARGET_EFAULT;
866 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
867 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
868 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
869 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
871 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
876 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
877 const struct mq_attr *attr)
879 struct target_mq_attr *target_mq_attr;
881 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
882 target_mq_attr_addr, 0))
883 return -TARGET_EFAULT;
885 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
886 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
887 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
888 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
890 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
895 /* do_select() must return target values and target errnos. */
896 static abi_long do_select(int n,
897 abi_ulong rfd_addr, abi_ulong wfd_addr,
898 abi_ulong efd_addr, abi_ulong target_tv_addr)
900 fd_set rfds, wfds, efds;
901 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
902 struct timeval tv, *tv_ptr;
906 if (copy_from_user_fdset(&rfds, rfd_addr, n))
907 return -TARGET_EFAULT;
913 if (copy_from_user_fdset(&wfds, wfd_addr, n))
914 return -TARGET_EFAULT;
920 if (copy_from_user_fdset(&efds, efd_addr, n))
921 return -TARGET_EFAULT;
927 if (target_tv_addr) {
928 if (copy_from_user_timeval(&tv, target_tv_addr))
929 return -TARGET_EFAULT;
935 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
937 if (!is_error(ret)) {
938 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
939 return -TARGET_EFAULT;
940 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
941 return -TARGET_EFAULT;
942 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
943 return -TARGET_EFAULT;
945 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
946 return -TARGET_EFAULT;
952 static abi_long do_pipe2(int host_pipe[], int flags)
955 return pipe2(host_pipe, flags);
961 static abi_long do_pipe(void *cpu_env, int pipedes, int flags)
965 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
968 return get_errno(ret);
969 #if defined(TARGET_MIPS)
970 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
972 #elif defined(TARGET_SH4)
973 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
976 if (put_user_s32(host_pipe[0], pipedes)
977 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
978 return -TARGET_EFAULT;
980 return get_errno(ret);
983 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
984 abi_ulong target_addr,
987 struct target_ip_mreqn *target_smreqn;
989 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
991 return -TARGET_EFAULT;
992 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
993 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
994 if (len == sizeof(struct target_ip_mreqn))
995 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
996 unlock_user(target_smreqn, target_addr, 0);
1001 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1002 abi_ulong target_addr,
1005 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1006 sa_family_t sa_family;
1007 struct target_sockaddr *target_saddr;
1009 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1011 return -TARGET_EFAULT;
1013 sa_family = tswap16(target_saddr->sa_family);
1015 /* Oops. The caller might send a incomplete sun_path; sun_path
1016 * must be terminated by \0 (see the manual page), but
1017 * unfortunately it is quite common to specify sockaddr_un
1018 * length as "strlen(x->sun_path)" while it should be
1019 * "strlen(...) + 1". We'll fix that here if needed.
1020 * Linux kernel has a similar feature.
1023 if (sa_family == AF_UNIX) {
1024 if (len < unix_maxlen && len > 0) {
1025 char *cp = (char*)target_saddr;
1027 if ( cp[len-1] && !cp[len] )
1030 if (len > unix_maxlen)
1034 memcpy(addr, target_saddr, len);
1035 addr->sa_family = sa_family;
1036 unlock_user(target_saddr, target_addr, 0);
1041 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1042 struct sockaddr *addr,
1045 struct target_sockaddr *target_saddr;
1047 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1049 return -TARGET_EFAULT;
1050 memcpy(target_saddr, addr, len);
1051 target_saddr->sa_family = tswap16(addr->sa_family);
1052 unlock_user(target_saddr, target_addr, len);
1057 /* ??? Should this also swap msgh->name? */
1058 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1059 struct target_msghdr *target_msgh)
1061 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1062 abi_long msg_controllen;
1063 abi_ulong target_cmsg_addr;
1064 struct target_cmsghdr *target_cmsg;
1065 socklen_t space = 0;
1067 msg_controllen = tswapl(target_msgh->msg_controllen);
1068 if (msg_controllen < sizeof (struct target_cmsghdr))
1070 target_cmsg_addr = tswapl(target_msgh->msg_control);
1071 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1073 return -TARGET_EFAULT;
1075 while (cmsg && target_cmsg) {
1076 void *data = CMSG_DATA(cmsg);
1077 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1079 int len = tswapl(target_cmsg->cmsg_len)
1080 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1082 space += CMSG_SPACE(len);
1083 if (space > msgh->msg_controllen) {
1084 space -= CMSG_SPACE(len);
1085 gemu_log("Host cmsg overflow\n");
1089 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1090 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1091 cmsg->cmsg_len = CMSG_LEN(len);
1093 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1094 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1095 memcpy(data, target_data, len);
1097 int *fd = (int *)data;
1098 int *target_fd = (int *)target_data;
1099 int i, numfds = len / sizeof(int);
1101 for (i = 0; i < numfds; i++)
1102 fd[i] = tswap32(target_fd[i]);
1105 cmsg = CMSG_NXTHDR(msgh, cmsg);
1106 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1108 unlock_user(target_cmsg, target_cmsg_addr, 0);
1110 msgh->msg_controllen = space;
1114 /* ??? Should this also swap msgh->name? */
1115 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1116 struct msghdr *msgh)
1118 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1119 abi_long msg_controllen;
1120 abi_ulong target_cmsg_addr;
1121 struct target_cmsghdr *target_cmsg;
1122 socklen_t space = 0;
1124 msg_controllen = tswapl(target_msgh->msg_controllen);
1125 if (msg_controllen < sizeof (struct target_cmsghdr))
1127 target_cmsg_addr = tswapl(target_msgh->msg_control);
1128 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1130 return -TARGET_EFAULT;
1132 while (cmsg && target_cmsg) {
1133 void *data = CMSG_DATA(cmsg);
1134 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1136 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1138 space += TARGET_CMSG_SPACE(len);
1139 if (space > msg_controllen) {
1140 space -= TARGET_CMSG_SPACE(len);
1141 gemu_log("Target cmsg overflow\n");
1145 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1146 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1147 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1149 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1150 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1151 memcpy(target_data, data, len);
1153 int *fd = (int *)data;
1154 int *target_fd = (int *)target_data;
1155 int i, numfds = len / sizeof(int);
1157 for (i = 0; i < numfds; i++)
1158 target_fd[i] = tswap32(fd[i]);
1161 cmsg = CMSG_NXTHDR(msgh, cmsg);
1162 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1164 unlock_user(target_cmsg, target_cmsg_addr, space);
1166 target_msgh->msg_controllen = tswapl(space);
1170 /* do_setsockopt() Must return target values and target errnos. */
1171 static abi_long do_setsockopt(int sockfd, int level, int optname,
1172 abi_ulong optval_addr, socklen_t optlen)
1176 struct ip_mreqn *ip_mreq;
1177 struct ip_mreq_source *ip_mreq_source;
1181 /* TCP options all take an 'int' value. */
1182 if (optlen < sizeof(uint32_t))
1183 return -TARGET_EINVAL;
1185 if (get_user_u32(val, optval_addr))
1186 return -TARGET_EFAULT;
1187 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1194 case IP_ROUTER_ALERT:
1198 case IP_MTU_DISCOVER:
1204 case IP_MULTICAST_TTL:
1205 case IP_MULTICAST_LOOP:
1207 if (optlen >= sizeof(uint32_t)) {
1208 if (get_user_u32(val, optval_addr))
1209 return -TARGET_EFAULT;
1210 } else if (optlen >= 1) {
1211 if (get_user_u8(val, optval_addr))
1212 return -TARGET_EFAULT;
1214 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1216 case IP_ADD_MEMBERSHIP:
1217 case IP_DROP_MEMBERSHIP:
1218 if (optlen < sizeof (struct target_ip_mreq) ||
1219 optlen > sizeof (struct target_ip_mreqn))
1220 return -TARGET_EINVAL;
1222 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1223 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1224 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1227 case IP_BLOCK_SOURCE:
1228 case IP_UNBLOCK_SOURCE:
1229 case IP_ADD_SOURCE_MEMBERSHIP:
1230 case IP_DROP_SOURCE_MEMBERSHIP:
1231 if (optlen != sizeof (struct target_ip_mreq_source))
1232 return -TARGET_EINVAL;
1234 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1235 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1236 unlock_user (ip_mreq_source, optval_addr, 0);
1243 case TARGET_SOL_SOCKET:
1245 /* Options with 'int' argument. */
1246 case TARGET_SO_DEBUG:
1249 case TARGET_SO_REUSEADDR:
1250 optname = SO_REUSEADDR;
1252 case TARGET_SO_TYPE:
1255 case TARGET_SO_ERROR:
1258 case TARGET_SO_DONTROUTE:
1259 optname = SO_DONTROUTE;
1261 case TARGET_SO_BROADCAST:
1262 optname = SO_BROADCAST;
1264 case TARGET_SO_SNDBUF:
1265 optname = SO_SNDBUF;
1267 case TARGET_SO_RCVBUF:
1268 optname = SO_RCVBUF;
1270 case TARGET_SO_KEEPALIVE:
1271 optname = SO_KEEPALIVE;
1273 case TARGET_SO_OOBINLINE:
1274 optname = SO_OOBINLINE;
1276 case TARGET_SO_NO_CHECK:
1277 optname = SO_NO_CHECK;
1279 case TARGET_SO_PRIORITY:
1280 optname = SO_PRIORITY;
1283 case TARGET_SO_BSDCOMPAT:
1284 optname = SO_BSDCOMPAT;
1287 case TARGET_SO_PASSCRED:
1288 optname = SO_PASSCRED;
1290 case TARGET_SO_TIMESTAMP:
1291 optname = SO_TIMESTAMP;
1293 case TARGET_SO_RCVLOWAT:
1294 optname = SO_RCVLOWAT;
1296 case TARGET_SO_RCVTIMEO:
1297 optname = SO_RCVTIMEO;
1299 case TARGET_SO_SNDTIMEO:
1300 optname = SO_SNDTIMEO;
1306 if (optlen < sizeof(uint32_t))
1307 return -TARGET_EINVAL;
1309 if (get_user_u32(val, optval_addr))
1310 return -TARGET_EFAULT;
1311 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1315 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1316 ret = -TARGET_ENOPROTOOPT;
1321 /* do_getsockopt() Must return target values and target errnos. */
1322 static abi_long do_getsockopt(int sockfd, int level, int optname,
1323 abi_ulong optval_addr, abi_ulong optlen)
1330 case TARGET_SOL_SOCKET:
1333 case TARGET_SO_LINGER:
1334 case TARGET_SO_RCVTIMEO:
1335 case TARGET_SO_SNDTIMEO:
1336 case TARGET_SO_PEERCRED:
1337 case TARGET_SO_PEERNAME:
1338 /* These don't just return a single integer */
1345 /* TCP options all take an 'int' value. */
1347 if (get_user_u32(len, optlen))
1348 return -TARGET_EFAULT;
1350 return -TARGET_EINVAL;
1352 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1359 if (put_user_u32(val, optval_addr))
1360 return -TARGET_EFAULT;
1362 if (put_user_u8(val, optval_addr))
1363 return -TARGET_EFAULT;
1365 if (put_user_u32(len, optlen))
1366 return -TARGET_EFAULT;
1373 case IP_ROUTER_ALERT:
1377 case IP_MTU_DISCOVER:
1383 case IP_MULTICAST_TTL:
1384 case IP_MULTICAST_LOOP:
1385 if (get_user_u32(len, optlen))
1386 return -TARGET_EFAULT;
1388 return -TARGET_EINVAL;
1390 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1393 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1395 if (put_user_u32(len, optlen)
1396 || put_user_u8(val, optval_addr))
1397 return -TARGET_EFAULT;
1399 if (len > sizeof(int))
1401 if (put_user_u32(len, optlen)
1402 || put_user_u32(val, optval_addr))
1403 return -TARGET_EFAULT;
1407 ret = -TARGET_ENOPROTOOPT;
1413 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1415 ret = -TARGET_EOPNOTSUPP;
1422 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1423 * other lock functions have a return code of 0 for failure.
1425 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1426 int count, int copy)
1428 struct target_iovec *target_vec;
1432 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1434 return -TARGET_EFAULT;
1435 for(i = 0;i < count; i++) {
1436 base = tswapl(target_vec[i].iov_base);
1437 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1438 if (vec[i].iov_len != 0) {
1439 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1440 /* Don't check lock_user return value. We must call writev even
1441 if a element has invalid base address. */
1443 /* zero length pointer is ignored */
1444 vec[i].iov_base = NULL;
1447 unlock_user (target_vec, target_addr, 0);
1451 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1452 int count, int copy)
1454 struct target_iovec *target_vec;
1458 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1460 return -TARGET_EFAULT;
1461 for(i = 0;i < count; i++) {
1462 if (target_vec[i].iov_base) {
1463 base = tswapl(target_vec[i].iov_base);
1464 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1467 unlock_user (target_vec, target_addr, 0);
1472 /* do_socket() Must return target values and target errnos. */
1473 static abi_long do_socket(int domain, int type, int protocol)
1475 #if defined(TARGET_MIPS)
1477 case TARGET_SOCK_DGRAM:
1480 case TARGET_SOCK_STREAM:
1483 case TARGET_SOCK_RAW:
1486 case TARGET_SOCK_RDM:
1489 case TARGET_SOCK_SEQPACKET:
1490 type = SOCK_SEQPACKET;
1492 case TARGET_SOCK_PACKET:
1497 if (domain == PF_NETLINK)
1498 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1499 return get_errno(socket(domain, type, protocol));
1502 /* do_bind() Must return target values and target errnos. */
1503 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1509 return -TARGET_EINVAL;
1511 addr = alloca(addrlen+1);
1513 target_to_host_sockaddr(addr, target_addr, addrlen);
1514 return get_errno(bind(sockfd, addr, addrlen));
1517 /* do_connect() Must return target values and target errnos. */
1518 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1524 return -TARGET_EINVAL;
1526 addr = alloca(addrlen);
1528 target_to_host_sockaddr(addr, target_addr, addrlen);
1529 return get_errno(connect(sockfd, addr, addrlen));
1532 /* do_sendrecvmsg() Must return target values and target errnos. */
1533 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1534 int flags, int send)
1537 struct target_msghdr *msgp;
1541 abi_ulong target_vec;
1544 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1548 return -TARGET_EFAULT;
1549 if (msgp->msg_name) {
1550 msg.msg_namelen = tswap32(msgp->msg_namelen);
1551 msg.msg_name = alloca(msg.msg_namelen);
1552 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1555 msg.msg_name = NULL;
1556 msg.msg_namelen = 0;
1558 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1559 msg.msg_control = alloca(msg.msg_controllen);
1560 msg.msg_flags = tswap32(msgp->msg_flags);
1562 count = tswapl(msgp->msg_iovlen);
1563 vec = alloca(count * sizeof(struct iovec));
1564 target_vec = tswapl(msgp->msg_iov);
1565 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1566 msg.msg_iovlen = count;
1570 ret = target_to_host_cmsg(&msg, msgp);
1572 ret = get_errno(sendmsg(fd, &msg, flags));
1574 ret = get_errno(recvmsg(fd, &msg, flags));
1575 if (!is_error(ret)) {
1577 ret = host_to_target_cmsg(msgp, &msg);
1582 unlock_iovec(vec, target_vec, count, !send);
1583 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1587 /* do_accept() Must return target values and target errnos. */
1588 static abi_long do_accept(int fd, abi_ulong target_addr,
1589 abi_ulong target_addrlen_addr)
1595 if (target_addr == 0)
1596 return get_errno(accept(fd, NULL, NULL));
1598 if (get_user_u32(addrlen, target_addrlen_addr))
1599 return -TARGET_EFAULT;
1602 return -TARGET_EINVAL;
1604 addr = alloca(addrlen);
1606 ret = get_errno(accept(fd, addr, &addrlen));
1607 if (!is_error(ret)) {
1608 host_to_target_sockaddr(target_addr, addr, addrlen);
1609 if (put_user_u32(addrlen, target_addrlen_addr))
1610 ret = -TARGET_EFAULT;
1615 /* do_getpeername() Must return target values and target errnos. */
1616 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1617 abi_ulong target_addrlen_addr)
1623 if (get_user_u32(addrlen, target_addrlen_addr))
1624 return -TARGET_EFAULT;
1627 return -TARGET_EINVAL;
1629 addr = alloca(addrlen);
1631 ret = get_errno(getpeername(fd, addr, &addrlen));
1632 if (!is_error(ret)) {
1633 host_to_target_sockaddr(target_addr, addr, addrlen);
1634 if (put_user_u32(addrlen, target_addrlen_addr))
1635 ret = -TARGET_EFAULT;
1640 /* do_getsockname() Must return target values and target errnos. */
1641 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1642 abi_ulong target_addrlen_addr)
1648 if (target_addr == 0)
1649 return get_errno(accept(fd, NULL, NULL));
1651 if (get_user_u32(addrlen, target_addrlen_addr))
1652 return -TARGET_EFAULT;
1655 return -TARGET_EINVAL;
1657 addr = alloca(addrlen);
1659 ret = get_errno(getsockname(fd, addr, &addrlen));
1660 if (!is_error(ret)) {
1661 host_to_target_sockaddr(target_addr, addr, addrlen);
1662 if (put_user_u32(addrlen, target_addrlen_addr))
1663 ret = -TARGET_EFAULT;
1668 /* do_socketpair() Must return target values and target errnos. */
1669 static abi_long do_socketpair(int domain, int type, int protocol,
1670 abi_ulong target_tab_addr)
1675 ret = get_errno(socketpair(domain, type, protocol, tab));
1676 if (!is_error(ret)) {
1677 if (put_user_s32(tab[0], target_tab_addr)
1678 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1679 ret = -TARGET_EFAULT;
1684 /* do_sendto() Must return target values and target errnos. */
1685 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1686 abi_ulong target_addr, socklen_t addrlen)
1693 return -TARGET_EINVAL;
1695 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1697 return -TARGET_EFAULT;
1699 addr = alloca(addrlen);
1700 target_to_host_sockaddr(addr, target_addr, addrlen);
1701 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1703 ret = get_errno(send(fd, host_msg, len, flags));
1705 unlock_user(host_msg, msg, 0);
1709 /* do_recvfrom() Must return target values and target errnos. */
1710 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1711 abi_ulong target_addr,
1712 abi_ulong target_addrlen)
1719 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1721 return -TARGET_EFAULT;
1723 if (get_user_u32(addrlen, target_addrlen)) {
1724 ret = -TARGET_EFAULT;
1728 ret = -TARGET_EINVAL;
1731 addr = alloca(addrlen);
1732 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1734 addr = NULL; /* To keep compiler quiet. */
1735 ret = get_errno(recv(fd, host_msg, len, flags));
1737 if (!is_error(ret)) {
1739 host_to_target_sockaddr(target_addr, addr, addrlen);
1740 if (put_user_u32(addrlen, target_addrlen)) {
1741 ret = -TARGET_EFAULT;
1745 unlock_user(host_msg, msg, len);
1748 unlock_user(host_msg, msg, 0);
1753 #ifdef TARGET_NR_socketcall
1754 /* do_socketcall() Must return target values and target errnos. */
1755 static abi_long do_socketcall(int num, abi_ulong vptr)
1758 const int n = sizeof(abi_ulong);
1763 int domain, type, protocol;
1765 if (get_user_s32(domain, vptr)
1766 || get_user_s32(type, vptr + n)
1767 || get_user_s32(protocol, vptr + 2 * n))
1768 return -TARGET_EFAULT;
1770 ret = do_socket(domain, type, protocol);
1776 abi_ulong target_addr;
1779 if (get_user_s32(sockfd, vptr)
1780 || get_user_ual(target_addr, vptr + n)
1781 || get_user_u32(addrlen, vptr + 2 * n))
1782 return -TARGET_EFAULT;
1784 ret = do_bind(sockfd, target_addr, addrlen);
1787 case SOCKOP_connect:
1790 abi_ulong target_addr;
1793 if (get_user_s32(sockfd, vptr)
1794 || get_user_ual(target_addr, vptr + n)
1795 || get_user_u32(addrlen, vptr + 2 * n))
1796 return -TARGET_EFAULT;
1798 ret = do_connect(sockfd, target_addr, addrlen);
1803 int sockfd, backlog;
1805 if (get_user_s32(sockfd, vptr)
1806 || get_user_s32(backlog, vptr + n))
1807 return -TARGET_EFAULT;
1809 ret = get_errno(listen(sockfd, backlog));
1815 abi_ulong target_addr, target_addrlen;
1817 if (get_user_s32(sockfd, vptr)
1818 || get_user_ual(target_addr, vptr + n)
1819 || get_user_u32(target_addrlen, vptr + 2 * n))
1820 return -TARGET_EFAULT;
1822 ret = do_accept(sockfd, target_addr, target_addrlen);
1825 case SOCKOP_getsockname:
1828 abi_ulong target_addr, target_addrlen;
1830 if (get_user_s32(sockfd, vptr)
1831 || get_user_ual(target_addr, vptr + n)
1832 || get_user_u32(target_addrlen, vptr + 2 * n))
1833 return -TARGET_EFAULT;
1835 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1838 case SOCKOP_getpeername:
1841 abi_ulong target_addr, target_addrlen;
1843 if (get_user_s32(sockfd, vptr)
1844 || get_user_ual(target_addr, vptr + n)
1845 || get_user_u32(target_addrlen, vptr + 2 * n))
1846 return -TARGET_EFAULT;
1848 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1851 case SOCKOP_socketpair:
1853 int domain, type, protocol;
1856 if (get_user_s32(domain, vptr)
1857 || get_user_s32(type, vptr + n)
1858 || get_user_s32(protocol, vptr + 2 * n)
1859 || get_user_ual(tab, vptr + 3 * n))
1860 return -TARGET_EFAULT;
1862 ret = do_socketpair(domain, type, protocol, tab);
1872 if (get_user_s32(sockfd, vptr)
1873 || get_user_ual(msg, vptr + n)
1874 || get_user_ual(len, vptr + 2 * n)
1875 || get_user_s32(flags, vptr + 3 * n))
1876 return -TARGET_EFAULT;
1878 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1888 if (get_user_s32(sockfd, vptr)
1889 || get_user_ual(msg, vptr + n)
1890 || get_user_ual(len, vptr + 2 * n)
1891 || get_user_s32(flags, vptr + 3 * n))
1892 return -TARGET_EFAULT;
1894 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1906 if (get_user_s32(sockfd, vptr)
1907 || get_user_ual(msg, vptr + n)
1908 || get_user_ual(len, vptr + 2 * n)
1909 || get_user_s32(flags, vptr + 3 * n)
1910 || get_user_ual(addr, vptr + 4 * n)
1911 || get_user_u32(addrlen, vptr + 5 * n))
1912 return -TARGET_EFAULT;
1914 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1917 case SOCKOP_recvfrom:
1926 if (get_user_s32(sockfd, vptr)
1927 || get_user_ual(msg, vptr + n)
1928 || get_user_ual(len, vptr + 2 * n)
1929 || get_user_s32(flags, vptr + 3 * n)
1930 || get_user_ual(addr, vptr + 4 * n)
1931 || get_user_u32(addrlen, vptr + 5 * n))
1932 return -TARGET_EFAULT;
1934 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1937 case SOCKOP_shutdown:
1941 if (get_user_s32(sockfd, vptr)
1942 || get_user_s32(how, vptr + n))
1943 return -TARGET_EFAULT;
1945 ret = get_errno(shutdown(sockfd, how));
1948 case SOCKOP_sendmsg:
1949 case SOCKOP_recvmsg:
1952 abi_ulong target_msg;
1955 if (get_user_s32(fd, vptr)
1956 || get_user_ual(target_msg, vptr + n)
1957 || get_user_s32(flags, vptr + 2 * n))
1958 return -TARGET_EFAULT;
1960 ret = do_sendrecvmsg(fd, target_msg, flags,
1961 (num == SOCKOP_sendmsg));
1964 case SOCKOP_setsockopt:
1972 if (get_user_s32(sockfd, vptr)
1973 || get_user_s32(level, vptr + n)
1974 || get_user_s32(optname, vptr + 2 * n)
1975 || get_user_ual(optval, vptr + 3 * n)
1976 || get_user_u32(optlen, vptr + 4 * n))
1977 return -TARGET_EFAULT;
1979 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1982 case SOCKOP_getsockopt:
1990 if (get_user_s32(sockfd, vptr)
1991 || get_user_s32(level, vptr + n)
1992 || get_user_s32(optname, vptr + 2 * n)
1993 || get_user_ual(optval, vptr + 3 * n)
1994 || get_user_u32(optlen, vptr + 4 * n))
1995 return -TARGET_EFAULT;
1997 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2001 gemu_log("Unsupported socketcall: %d\n", num);
2002 ret = -TARGET_ENOSYS;
2009 #define N_SHM_REGIONS 32
2011 static struct shm_region {
2014 } shm_regions[N_SHM_REGIONS];
2016 struct target_ipc_perm
2023 unsigned short int mode;
2024 unsigned short int __pad1;
2025 unsigned short int __seq;
2026 unsigned short int __pad2;
2027 abi_ulong __unused1;
2028 abi_ulong __unused2;
2031 struct target_semid_ds
2033 struct target_ipc_perm sem_perm;
2034 abi_ulong sem_otime;
2035 abi_ulong __unused1;
2036 abi_ulong sem_ctime;
2037 abi_ulong __unused2;
2038 abi_ulong sem_nsems;
2039 abi_ulong __unused3;
2040 abi_ulong __unused4;
2043 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2044 abi_ulong target_addr)
2046 struct target_ipc_perm *target_ip;
2047 struct target_semid_ds *target_sd;
2049 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2050 return -TARGET_EFAULT;
2051 target_ip=&(target_sd->sem_perm);
2052 host_ip->__key = tswapl(target_ip->__key);
2053 host_ip->uid = tswapl(target_ip->uid);
2054 host_ip->gid = tswapl(target_ip->gid);
2055 host_ip->cuid = tswapl(target_ip->cuid);
2056 host_ip->cgid = tswapl(target_ip->cgid);
2057 host_ip->mode = tswapl(target_ip->mode);
2058 unlock_user_struct(target_sd, target_addr, 0);
2062 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2063 struct ipc_perm *host_ip)
2065 struct target_ipc_perm *target_ip;
2066 struct target_semid_ds *target_sd;
2068 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2069 return -TARGET_EFAULT;
2070 target_ip = &(target_sd->sem_perm);
2071 target_ip->__key = tswapl(host_ip->__key);
2072 target_ip->uid = tswapl(host_ip->uid);
2073 target_ip->gid = tswapl(host_ip->gid);
2074 target_ip->cuid = tswapl(host_ip->cuid);
2075 target_ip->cgid = tswapl(host_ip->cgid);
2076 target_ip->mode = tswapl(host_ip->mode);
2077 unlock_user_struct(target_sd, target_addr, 1);
2081 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2082 abi_ulong target_addr)
2084 struct target_semid_ds *target_sd;
2086 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2087 return -TARGET_EFAULT;
2088 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2089 return -TARGET_EFAULT;
2090 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2091 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2092 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2093 unlock_user_struct(target_sd, target_addr, 0);
2097 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2098 struct semid_ds *host_sd)
2100 struct target_semid_ds *target_sd;
2102 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2103 return -TARGET_EFAULT;
2104 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2105 return -TARGET_EFAULT;;
2106 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2107 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2108 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2109 unlock_user_struct(target_sd, target_addr, 1);
2113 struct target_seminfo {
2126 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2127 struct seminfo *host_seminfo)
2129 struct target_seminfo *target_seminfo;
2130 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2131 return -TARGET_EFAULT;
2132 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2133 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2134 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2135 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2136 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2137 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2138 __put_user(host_seminfo->semume, &target_seminfo->semume);
2139 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2140 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2141 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2142 unlock_user_struct(target_seminfo, target_addr, 1);
2148 struct semid_ds *buf;
2149 unsigned short *array;
2150 struct seminfo *__buf;
2153 union target_semun {
2160 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2161 abi_ulong target_addr)
2164 unsigned short *array;
2166 struct semid_ds semid_ds;
2169 semun.buf = &semid_ds;
2171 ret = semctl(semid, 0, IPC_STAT, semun);
2173 return get_errno(ret);
2175 nsems = semid_ds.sem_nsems;
2177 *host_array = malloc(nsems*sizeof(unsigned short));
2178 array = lock_user(VERIFY_READ, target_addr,
2179 nsems*sizeof(unsigned short), 1);
2181 return -TARGET_EFAULT;
2183 for(i=0; i<nsems; i++) {
2184 __get_user((*host_array)[i], &array[i]);
2186 unlock_user(array, target_addr, 0);
2191 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2192 unsigned short **host_array)
2195 unsigned short *array;
2197 struct semid_ds semid_ds;
2200 semun.buf = &semid_ds;
2202 ret = semctl(semid, 0, IPC_STAT, semun);
2204 return get_errno(ret);
2206 nsems = semid_ds.sem_nsems;
2208 array = lock_user(VERIFY_WRITE, target_addr,
2209 nsems*sizeof(unsigned short), 0);
2211 return -TARGET_EFAULT;
2213 for(i=0; i<nsems; i++) {
2214 __put_user((*host_array)[i], &array[i]);
2217 unlock_user(array, target_addr, 1);
2222 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2223 union target_semun target_su)
2226 struct semid_ds dsarg;
2227 unsigned short *array;
2228 struct seminfo seminfo;
2229 abi_long ret = -TARGET_EINVAL;
2236 arg.val = tswapl(target_su.val);
2237 ret = get_errno(semctl(semid, semnum, cmd, arg));
2238 target_su.val = tswapl(arg.val);
2242 err = target_to_host_semarray(semid, &array, target_su.array);
2246 ret = get_errno(semctl(semid, semnum, cmd, arg));
2247 err = host_to_target_semarray(semid, target_su.array, &array);
2254 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2258 ret = get_errno(semctl(semid, semnum, cmd, arg));
2259 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2265 arg.__buf = &seminfo;
2266 ret = get_errno(semctl(semid, semnum, cmd, arg));
2267 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2275 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2282 struct target_sembuf {
2283 unsigned short sem_num;
2288 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2289 abi_ulong target_addr,
2292 struct target_sembuf *target_sembuf;
2295 target_sembuf = lock_user(VERIFY_READ, target_addr,
2296 nsops*sizeof(struct target_sembuf), 1);
2298 return -TARGET_EFAULT;
2300 for(i=0; i<nsops; i++) {
2301 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2302 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2303 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2306 unlock_user(target_sembuf, target_addr, 0);
2311 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2313 struct sembuf sops[nsops];
2315 if (target_to_host_sembuf(sops, ptr, nsops))
2316 return -TARGET_EFAULT;
2318 return semop(semid, sops, nsops);
2321 struct target_msqid_ds
2323 struct target_ipc_perm msg_perm;
2324 abi_ulong msg_stime;
2325 #if TARGET_ABI_BITS == 32
2326 abi_ulong __unused1;
2328 abi_ulong msg_rtime;
2329 #if TARGET_ABI_BITS == 32
2330 abi_ulong __unused2;
2332 abi_ulong msg_ctime;
2333 #if TARGET_ABI_BITS == 32
2334 abi_ulong __unused3;
2336 abi_ulong __msg_cbytes;
2338 abi_ulong msg_qbytes;
2339 abi_ulong msg_lspid;
2340 abi_ulong msg_lrpid;
2341 abi_ulong __unused4;
2342 abi_ulong __unused5;
2345 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2346 abi_ulong target_addr)
2348 struct target_msqid_ds *target_md;
2350 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2351 return -TARGET_EFAULT;
2352 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2353 return -TARGET_EFAULT;
2354 host_md->msg_stime = tswapl(target_md->msg_stime);
2355 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2356 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2357 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2358 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2359 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2360 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2361 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2362 unlock_user_struct(target_md, target_addr, 0);
2366 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2367 struct msqid_ds *host_md)
2369 struct target_msqid_ds *target_md;
2371 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2372 return -TARGET_EFAULT;
2373 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2374 return -TARGET_EFAULT;
2375 target_md->msg_stime = tswapl(host_md->msg_stime);
2376 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2377 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2378 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2379 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2380 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2381 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2382 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2383 unlock_user_struct(target_md, target_addr, 1);
2387 struct target_msginfo {
2395 unsigned short int msgseg;
2398 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2399 struct msginfo *host_msginfo)
2401 struct target_msginfo *target_msginfo;
2402 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2403 return -TARGET_EFAULT;
2404 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2405 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2406 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2407 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2408 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2409 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2410 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2411 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2412 unlock_user_struct(target_msginfo, target_addr, 1);
2416 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2418 struct msqid_ds dsarg;
2419 struct msginfo msginfo;
2420 abi_long ret = -TARGET_EINVAL;
2428 if (target_to_host_msqid_ds(&dsarg,ptr))
2429 return -TARGET_EFAULT;
2430 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2431 if (host_to_target_msqid_ds(ptr,&dsarg))
2432 return -TARGET_EFAULT;
2435 ret = get_errno(msgctl(msgid, cmd, NULL));
2439 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2440 if (host_to_target_msginfo(ptr, &msginfo))
2441 return -TARGET_EFAULT;
2448 struct target_msgbuf {
2453 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2454 unsigned int msgsz, int msgflg)
2456 struct target_msgbuf *target_mb;
2457 struct msgbuf *host_mb;
2460 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2461 return -TARGET_EFAULT;
2462 host_mb = malloc(msgsz+sizeof(long));
2463 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2464 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2465 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2467 unlock_user_struct(target_mb, msgp, 0);
2472 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2473 unsigned int msgsz, abi_long msgtyp,
2476 struct target_msgbuf *target_mb;
2478 struct msgbuf *host_mb;
2481 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2482 return -TARGET_EFAULT;
2484 host_mb = malloc(msgsz+sizeof(long));
2485 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2488 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2489 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2490 if (!target_mtext) {
2491 ret = -TARGET_EFAULT;
2494 memcpy(target_mb->mtext, host_mb->mtext, ret);
2495 unlock_user(target_mtext, target_mtext_addr, ret);
2498 target_mb->mtype = tswapl(host_mb->mtype);
2503 unlock_user_struct(target_mb, msgp, 1);
2507 struct target_shmid_ds
2509 struct target_ipc_perm shm_perm;
2510 abi_ulong shm_segsz;
2511 abi_ulong shm_atime;
2512 #if TARGET_ABI_BITS == 32
2513 abi_ulong __unused1;
2515 abi_ulong shm_dtime;
2516 #if TARGET_ABI_BITS == 32
2517 abi_ulong __unused2;
2519 abi_ulong shm_ctime;
2520 #if TARGET_ABI_BITS == 32
2521 abi_ulong __unused3;
2525 abi_ulong shm_nattch;
2526 unsigned long int __unused4;
2527 unsigned long int __unused5;
2530 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2531 abi_ulong target_addr)
2533 struct target_shmid_ds *target_sd;
2535 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2536 return -TARGET_EFAULT;
2537 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2538 return -TARGET_EFAULT;
2539 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2540 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2541 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2542 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2543 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2544 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2545 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2546 unlock_user_struct(target_sd, target_addr, 0);
2550 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2551 struct shmid_ds *host_sd)
2553 struct target_shmid_ds *target_sd;
2555 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2556 return -TARGET_EFAULT;
2557 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2558 return -TARGET_EFAULT;
2559 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2560 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2561 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2562 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2563 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2564 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2565 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2566 unlock_user_struct(target_sd, target_addr, 1);
2570 struct target_shminfo {
2578 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2579 struct shminfo *host_shminfo)
2581 struct target_shminfo *target_shminfo;
2582 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2583 return -TARGET_EFAULT;
2584 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2585 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2586 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2587 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2588 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2589 unlock_user_struct(target_shminfo, target_addr, 1);
2593 struct target_shm_info {
2598 abi_ulong swap_attempts;
2599 abi_ulong swap_successes;
2602 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2603 struct shm_info *host_shm_info)
2605 struct target_shm_info *target_shm_info;
2606 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2607 return -TARGET_EFAULT;
2608 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2609 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2610 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2611 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2612 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2613 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2614 unlock_user_struct(target_shm_info, target_addr, 1);
2618 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2620 struct shmid_ds dsarg;
2621 struct shminfo shminfo;
2622 struct shm_info shm_info;
2623 abi_long ret = -TARGET_EINVAL;
2631 if (target_to_host_shmid_ds(&dsarg, buf))
2632 return -TARGET_EFAULT;
2633 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2634 if (host_to_target_shmid_ds(buf, &dsarg))
2635 return -TARGET_EFAULT;
2638 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2639 if (host_to_target_shminfo(buf, &shminfo))
2640 return -TARGET_EFAULT;
2643 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2644 if (host_to_target_shm_info(buf, &shm_info))
2645 return -TARGET_EFAULT;
2650 ret = get_errno(shmctl(shmid, cmd, NULL));
2657 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2661 struct shmid_ds shm_info;
2664 /* find out the length of the shared memory segment */
2665 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2666 if (is_error(ret)) {
2667 /* can't get length, bail out */
2674 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2676 abi_ulong mmap_start;
2678 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2680 if (mmap_start == -1) {
2682 host_raddr = (void *)-1;
2684 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2687 if (host_raddr == (void *)-1) {
2689 return get_errno((long)host_raddr);
2691 raddr=h2g((unsigned long)host_raddr);
2693 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2694 PAGE_VALID | PAGE_READ |
2695 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2697 for (i = 0; i < N_SHM_REGIONS; i++) {
2698 if (shm_regions[i].start == 0) {
2699 shm_regions[i].start = raddr;
2700 shm_regions[i].size = shm_info.shm_segsz;
2710 static inline abi_long do_shmdt(abi_ulong shmaddr)
2714 for (i = 0; i < N_SHM_REGIONS; ++i) {
2715 if (shm_regions[i].start == shmaddr) {
2716 shm_regions[i].start = 0;
2717 page_set_flags(shmaddr, shm_regions[i].size, 0);
2722 return get_errno(shmdt(g2h(shmaddr)));
2725 #ifdef TARGET_NR_ipc
2726 /* ??? This only works with linear mappings. */
2727 /* do_ipc() must return target values and target errnos. */
2728 static abi_long do_ipc(unsigned int call, int first,
2729 int second, int third,
2730 abi_long ptr, abi_long fifth)
2735 version = call >> 16;
2740 ret = do_semop(first, ptr, second);
2744 ret = get_errno(semget(first, second, third));
2748 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2752 ret = get_errno(msgget(first, second));
2756 ret = do_msgsnd(first, ptr, second, third);
2760 ret = do_msgctl(first, second, ptr);
2767 struct target_ipc_kludge {
2772 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2773 ret = -TARGET_EFAULT;
2777 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2779 unlock_user_struct(tmp, ptr, 0);
2783 ret = do_msgrcv(first, ptr, second, fifth, third);
2792 raddr = do_shmat(first, ptr, second);
2793 if (is_error(raddr))
2794 return get_errno(raddr);
2795 if (put_user_ual(raddr, third))
2796 return -TARGET_EFAULT;
2800 ret = -TARGET_EINVAL;
2805 ret = do_shmdt(ptr);
2809 /* IPC_* flag values are the same on all linux platforms */
2810 ret = get_errno(shmget(first, second, third));
2813 /* IPC_* and SHM_* command values are the same on all linux platforms */
2815 ret = do_shmctl(first, second, third);
2818 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2819 ret = -TARGET_ENOSYS;
2826 /* kernel structure types definitions */
2829 #define STRUCT(name, list...) STRUCT_ ## name,
2830 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2832 #include "syscall_types.h"
2835 #undef STRUCT_SPECIAL
2837 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2838 #define STRUCT_SPECIAL(name)
2839 #include "syscall_types.h"
2841 #undef STRUCT_SPECIAL
2843 typedef struct IOCTLEntry {
2844 unsigned int target_cmd;
2845 unsigned int host_cmd;
2848 const argtype arg_type[5];
2851 #define IOC_R 0x0001
2852 #define IOC_W 0x0002
2853 #define IOC_RW (IOC_R | IOC_W)
2855 #define MAX_STRUCT_SIZE 4096
2857 static IOCTLEntry ioctl_entries[] = {
2858 #define IOCTL(cmd, access, types...) \
2859 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2864 /* ??? Implement proper locking for ioctls. */
2865 /* do_ioctl() Must return target values and target errnos. */
2866 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2868 const IOCTLEntry *ie;
2869 const argtype *arg_type;
2871 uint8_t buf_temp[MAX_STRUCT_SIZE];
2877 if (ie->target_cmd == 0) {
2878 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2879 return -TARGET_ENOSYS;
2881 if (ie->target_cmd == cmd)
2885 arg_type = ie->arg_type;
2887 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2889 switch(arg_type[0]) {
2892 ret = get_errno(ioctl(fd, ie->host_cmd));
2897 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2901 target_size = thunk_type_size(arg_type, 0);
2902 switch(ie->access) {
2904 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2905 if (!is_error(ret)) {
2906 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2908 return -TARGET_EFAULT;
2909 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2910 unlock_user(argptr, arg, target_size);
2914 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2916 return -TARGET_EFAULT;
2917 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2918 unlock_user(argptr, arg, 0);
2919 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2923 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2925 return -TARGET_EFAULT;
2926 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2927 unlock_user(argptr, arg, 0);
2928 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2929 if (!is_error(ret)) {
2930 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2932 return -TARGET_EFAULT;
2933 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2934 unlock_user(argptr, arg, target_size);
2940 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2941 (long)cmd, arg_type[0]);
2942 ret = -TARGET_ENOSYS;
2948 static const bitmask_transtbl iflag_tbl[] = {
2949 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2950 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2951 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2952 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2953 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2954 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2955 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2956 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2957 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2958 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2959 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2960 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2961 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2962 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2966 static const bitmask_transtbl oflag_tbl[] = {
2967 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2968 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2969 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2970 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2971 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2972 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2973 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2974 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2975 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2976 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2977 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2978 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2979 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2980 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2981 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2982 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2983 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2984 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2985 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2986 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2987 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2988 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2989 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2990 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2994 static const bitmask_transtbl cflag_tbl[] = {
2995 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2996 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2997 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2998 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2999 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3000 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3001 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3002 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3003 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3004 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3005 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3006 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3007 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3008 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3009 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3010 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3011 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3012 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3013 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3014 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3015 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3016 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3017 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3018 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3019 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3020 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3021 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3022 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3023 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3024 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3025 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3029 static const bitmask_transtbl lflag_tbl[] = {
3030 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3031 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3032 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3033 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3034 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3035 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3036 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3037 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3038 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3039 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3040 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3041 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3042 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3043 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3044 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3048 static void target_to_host_termios (void *dst, const void *src)
3050 struct host_termios *host = dst;
3051 const struct target_termios *target = src;
3054 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3056 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3058 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3060 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3061 host->c_line = target->c_line;
3063 memset(host->c_cc, 0, sizeof(host->c_cc));
3064 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3065 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3066 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3067 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3068 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3069 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3070 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3071 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3072 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3073 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3074 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3075 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3076 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3077 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3078 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3079 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3080 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3083 static void host_to_target_termios (void *dst, const void *src)
3085 struct target_termios *target = dst;
3086 const struct host_termios *host = src;
3089 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3091 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3093 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3095 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3096 target->c_line = host->c_line;
3098 memset(target->c_cc, 0, sizeof(target->c_cc));
3099 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3100 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3101 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3102 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3103 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3104 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3105 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3106 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3107 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3108 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3109 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3110 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3111 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3112 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3113 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3114 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3115 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3118 static const StructEntry struct_termios_def = {
3119 .convert = { host_to_target_termios, target_to_host_termios },
3120 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3121 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3124 static bitmask_transtbl mmap_flags_tbl[] = {
3125 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3126 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3127 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3128 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3129 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3130 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3131 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3132 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3136 #if defined(TARGET_I386)
3138 /* NOTE: there is really one LDT for all the threads */
3139 static uint8_t *ldt_table;
3141 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3148 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3149 if (size > bytecount)
3151 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3153 return -TARGET_EFAULT;
3154 /* ??? Should this by byteswapped? */
3155 memcpy(p, ldt_table, size);
3156 unlock_user(p, ptr, size);
3160 /* XXX: add locking support */
3161 static abi_long write_ldt(CPUX86State *env,
3162 abi_ulong ptr, unsigned long bytecount, int oldmode)
3164 struct target_modify_ldt_ldt_s ldt_info;
3165 struct target_modify_ldt_ldt_s *target_ldt_info;
3166 int seg_32bit, contents, read_exec_only, limit_in_pages;
3167 int seg_not_present, useable, lm;
3168 uint32_t *lp, entry_1, entry_2;
3170 if (bytecount != sizeof(ldt_info))
3171 return -TARGET_EINVAL;
3172 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3173 return -TARGET_EFAULT;
3174 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3175 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3176 ldt_info.limit = tswap32(target_ldt_info->limit);
3177 ldt_info.flags = tswap32(target_ldt_info->flags);
3178 unlock_user_struct(target_ldt_info, ptr, 0);
3180 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3181 return -TARGET_EINVAL;
3182 seg_32bit = ldt_info.flags & 1;
3183 contents = (ldt_info.flags >> 1) & 3;
3184 read_exec_only = (ldt_info.flags >> 3) & 1;
3185 limit_in_pages = (ldt_info.flags >> 4) & 1;
3186 seg_not_present = (ldt_info.flags >> 5) & 1;
3187 useable = (ldt_info.flags >> 6) & 1;
3191 lm = (ldt_info.flags >> 7) & 1;
3193 if (contents == 3) {
3195 return -TARGET_EINVAL;
3196 if (seg_not_present == 0)
3197 return -TARGET_EINVAL;
3199 /* allocate the LDT */
3201 env->ldt.base = target_mmap(0,
3202 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3203 PROT_READ|PROT_WRITE,
3204 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3205 if (env->ldt.base == -1)
3206 return -TARGET_ENOMEM;
3207 memset(g2h(env->ldt.base), 0,
3208 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3209 env->ldt.limit = 0xffff;
3210 ldt_table = g2h(env->ldt.base);
3213 /* NOTE: same code as Linux kernel */
3214 /* Allow LDTs to be cleared by the user. */
3215 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3218 read_exec_only == 1 &&
3220 limit_in_pages == 0 &&
3221 seg_not_present == 1 &&
3229 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3230 (ldt_info.limit & 0x0ffff);
3231 entry_2 = (ldt_info.base_addr & 0xff000000) |
3232 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3233 (ldt_info.limit & 0xf0000) |
3234 ((read_exec_only ^ 1) << 9) |
3236 ((seg_not_present ^ 1) << 15) |
3238 (limit_in_pages << 23) |
3242 entry_2 |= (useable << 20);
3244 /* Install the new entry ... */
3246 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3247 lp[0] = tswap32(entry_1);
3248 lp[1] = tswap32(entry_2);
3252 /* specific and weird i386 syscalls */
3253 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3254 unsigned long bytecount)
3260 ret = read_ldt(ptr, bytecount);
3263 ret = write_ldt(env, ptr, bytecount, 1);
3266 ret = write_ldt(env, ptr, bytecount, 0);
3269 ret = -TARGET_ENOSYS;
3275 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3276 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3278 uint64_t *gdt_table = g2h(env->gdt.base);
3279 struct target_modify_ldt_ldt_s ldt_info;
3280 struct target_modify_ldt_ldt_s *target_ldt_info;
3281 int seg_32bit, contents, read_exec_only, limit_in_pages;
3282 int seg_not_present, useable, lm;
3283 uint32_t *lp, entry_1, entry_2;
3286 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3287 if (!target_ldt_info)
3288 return -TARGET_EFAULT;
3289 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3290 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3291 ldt_info.limit = tswap32(target_ldt_info->limit);
3292 ldt_info.flags = tswap32(target_ldt_info->flags);
3293 if (ldt_info.entry_number == -1) {
3294 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3295 if (gdt_table[i] == 0) {
3296 ldt_info.entry_number = i;
3297 target_ldt_info->entry_number = tswap32(i);
3302 unlock_user_struct(target_ldt_info, ptr, 1);
3304 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3305 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3306 return -TARGET_EINVAL;
3307 seg_32bit = ldt_info.flags & 1;
3308 contents = (ldt_info.flags >> 1) & 3;
3309 read_exec_only = (ldt_info.flags >> 3) & 1;
3310 limit_in_pages = (ldt_info.flags >> 4) & 1;
3311 seg_not_present = (ldt_info.flags >> 5) & 1;
3312 useable = (ldt_info.flags >> 6) & 1;
3316 lm = (ldt_info.flags >> 7) & 1;
3319 if (contents == 3) {
3320 if (seg_not_present == 0)
3321 return -TARGET_EINVAL;
3324 /* NOTE: same code as Linux kernel */
3325 /* Allow LDTs to be cleared by the user. */
3326 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3327 if ((contents == 0 &&
3328 read_exec_only == 1 &&
3330 limit_in_pages == 0 &&
3331 seg_not_present == 1 &&
3339 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3340 (ldt_info.limit & 0x0ffff);
3341 entry_2 = (ldt_info.base_addr & 0xff000000) |
3342 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3343 (ldt_info.limit & 0xf0000) |
3344 ((read_exec_only ^ 1) << 9) |
3346 ((seg_not_present ^ 1) << 15) |
3348 (limit_in_pages << 23) |
3353 /* Install the new entry ... */
3355 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3356 lp[0] = tswap32(entry_1);
3357 lp[1] = tswap32(entry_2);
3361 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3363 struct target_modify_ldt_ldt_s *target_ldt_info;
3364 uint64_t *gdt_table = g2h(env->gdt.base);
3365 uint32_t base_addr, limit, flags;
3366 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3367 int seg_not_present, useable, lm;
3368 uint32_t *lp, entry_1, entry_2;
3370 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3371 if (!target_ldt_info)
3372 return -TARGET_EFAULT;
3373 idx = tswap32(target_ldt_info->entry_number);
3374 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3375 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3376 unlock_user_struct(target_ldt_info, ptr, 1);
3377 return -TARGET_EINVAL;
3379 lp = (uint32_t *)(gdt_table + idx);
3380 entry_1 = tswap32(lp[0]);
3381 entry_2 = tswap32(lp[1]);
3383 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3384 contents = (entry_2 >> 10) & 3;
3385 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3386 seg_32bit = (entry_2 >> 22) & 1;
3387 limit_in_pages = (entry_2 >> 23) & 1;
3388 useable = (entry_2 >> 20) & 1;
3392 lm = (entry_2 >> 21) & 1;
3394 flags = (seg_32bit << 0) | (contents << 1) |
3395 (read_exec_only << 3) | (limit_in_pages << 4) |
3396 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3397 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3398 base_addr = (entry_1 >> 16) |
3399 (entry_2 & 0xff000000) |
3400 ((entry_2 & 0xff) << 16);
3401 target_ldt_info->base_addr = tswapl(base_addr);
3402 target_ldt_info->limit = tswap32(limit);
3403 target_ldt_info->flags = tswap32(flags);
3404 unlock_user_struct(target_ldt_info, ptr, 1);
3407 #endif /* TARGET_I386 && TARGET_ABI32 */
3409 #ifndef TARGET_ABI32
3410 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3417 case TARGET_ARCH_SET_GS:
3418 case TARGET_ARCH_SET_FS:
3419 if (code == TARGET_ARCH_SET_GS)
3423 cpu_x86_load_seg(env, idx, 0);
3424 env->segs[idx].base = addr;
3426 case TARGET_ARCH_GET_GS:
3427 case TARGET_ARCH_GET_FS:
3428 if (code == TARGET_ARCH_GET_GS)
3432 val = env->segs[idx].base;
3433 if (put_user(val, addr, abi_ulong))
3434 return -TARGET_EFAULT;
3437 ret = -TARGET_EINVAL;
3444 #endif /* defined(TARGET_I386) */
3446 #if defined(USE_NPTL)
3448 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3450 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3453 pthread_mutex_t mutex;
3454 pthread_cond_t cond;
3458 abi_ulong child_tidptr;
3459 abi_ulong parent_tidptr;
3463 static void *clone_func(void *arg)
3465 new_thread_info *info = arg;
3471 ts = (TaskState *)thread_env->opaque;
3472 info->tid = gettid();
3474 if (info->child_tidptr)
3475 put_user_u32(info->tid, info->child_tidptr);
3476 if (info->parent_tidptr)
3477 put_user_u32(info->tid, info->parent_tidptr);
3478 /* Enable signals. */
3479 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3480 /* Signal to the parent that we're ready. */
3481 pthread_mutex_lock(&info->mutex);
3482 pthread_cond_broadcast(&info->cond);
3483 pthread_mutex_unlock(&info->mutex);
3484 /* Wait until the parent has finshed initializing the tls state. */
3485 pthread_mutex_lock(&clone_lock);
3486 pthread_mutex_unlock(&clone_lock);
3492 /* this stack is the equivalent of the kernel stack associated with a
3494 #define NEW_STACK_SIZE 8192
3496 static int clone_func(void *arg)
3498 CPUState *env = arg;
3505 /* do_fork() Must return host values and target errnos (unlike most
3506 do_*() functions). */
3507 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3508 abi_ulong parent_tidptr, target_ulong newtls,
3509 abi_ulong child_tidptr)
3515 #if defined(USE_NPTL)
3516 unsigned int nptl_flags;
3520 /* Emulate vfork() with fork() */
3521 if (flags & CLONE_VFORK)
3522 flags &= ~(CLONE_VFORK | CLONE_VM);
3524 if (flags & CLONE_VM) {
3525 TaskState *parent_ts = (TaskState *)env->opaque;
3526 #if defined(USE_NPTL)
3527 new_thread_info info;
3528 pthread_attr_t attr;
3530 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3531 init_task_state(ts);
3532 new_stack = ts->stack;
3533 /* we create a new CPU instance. */
3534 new_env = cpu_copy(env);
3535 /* Init regs that differ from the parent. */
3536 cpu_clone_regs(new_env, newsp);
3537 new_env->opaque = ts;
3538 ts->bprm = parent_ts->bprm;
3539 ts->info = parent_ts->info;
3540 #if defined(USE_NPTL)
3542 flags &= ~CLONE_NPTL_FLAGS2;
3544 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3545 ts->child_tidptr = child_tidptr;
3548 if (nptl_flags & CLONE_SETTLS)
3549 cpu_set_tls (new_env, newtls);
3551 /* Grab a mutex so that thread setup appears atomic. */
3552 pthread_mutex_lock(&clone_lock);
3554 memset(&info, 0, sizeof(info));
3555 pthread_mutex_init(&info.mutex, NULL);
3556 pthread_mutex_lock(&info.mutex);
3557 pthread_cond_init(&info.cond, NULL);
3559 if (nptl_flags & CLONE_CHILD_SETTID)
3560 info.child_tidptr = child_tidptr;
3561 if (nptl_flags & CLONE_PARENT_SETTID)
3562 info.parent_tidptr = parent_tidptr;
3564 ret = pthread_attr_init(&attr);
3565 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3566 /* It is not safe to deliver signals until the child has finished
3567 initializing, so temporarily block all signals. */
3568 sigfillset(&sigmask);
3569 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3571 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3572 /* TODO: Free new CPU state if thread creation failed. */
3574 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3575 pthread_attr_destroy(&attr);
3577 /* Wait for the child to initialize. */
3578 pthread_cond_wait(&info.cond, &info.mutex);
3580 if (flags & CLONE_PARENT_SETTID)
3581 put_user_u32(ret, parent_tidptr);
3585 pthread_mutex_unlock(&info.mutex);
3586 pthread_cond_destroy(&info.cond);
3587 pthread_mutex_destroy(&info.mutex);
3588 pthread_mutex_unlock(&clone_lock);
3590 if (flags & CLONE_NPTL_FLAGS2)
3592 /* This is probably going to die very quickly, but do it anyway. */
3594 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3596 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3600 /* if no CLONE_VM, we consider it is a fork */
3601 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3606 /* Child Process. */
3607 cpu_clone_regs(env, newsp);
3609 #if defined(USE_NPTL)
3610 /* There is a race condition here. The parent process could
3611 theoretically read the TID in the child process before the child
3612 tid is set. This would require using either ptrace
3613 (not implemented) or having *_tidptr to point at a shared memory
3614 mapping. We can't repeat the spinlock hack used above because
3615 the child process gets its own copy of the lock. */
3616 if (flags & CLONE_CHILD_SETTID)
3617 put_user_u32(gettid(), child_tidptr);
3618 if (flags & CLONE_PARENT_SETTID)
3619 put_user_u32(gettid(), parent_tidptr);
3620 ts = (TaskState *)env->opaque;
3621 if (flags & CLONE_SETTLS)
3622 cpu_set_tls (env, newtls);
3623 if (flags & CLONE_CHILD_CLEARTID)
3624 ts->child_tidptr = child_tidptr;
3633 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3636 struct target_flock *target_fl;
3637 struct flock64 fl64;
3638 struct target_flock64 *target_fl64;
3642 case TARGET_F_GETLK:
3643 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3644 return -TARGET_EFAULT;
3645 fl.l_type = tswap16(target_fl->l_type);
3646 fl.l_whence = tswap16(target_fl->l_whence);
3647 fl.l_start = tswapl(target_fl->l_start);
3648 fl.l_len = tswapl(target_fl->l_len);
3649 fl.l_pid = tswapl(target_fl->l_pid);
3650 unlock_user_struct(target_fl, arg, 0);
3651 ret = get_errno(fcntl(fd, F_GETLK, &fl));
3653 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3654 return -TARGET_EFAULT;
3655 target_fl->l_type = tswap16(fl.l_type);
3656 target_fl->l_whence = tswap16(fl.l_whence);
3657 target_fl->l_start = tswapl(fl.l_start);
3658 target_fl->l_len = tswapl(fl.l_len);
3659 target_fl->l_pid = tswapl(fl.l_pid);
3660 unlock_user_struct(target_fl, arg, 1);
3664 case TARGET_F_SETLK:
3665 case TARGET_F_SETLKW:
3666 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3667 return -TARGET_EFAULT;
3668 fl.l_type = tswap16(target_fl->l_type);
3669 fl.l_whence = tswap16(target_fl->l_whence);
3670 fl.l_start = tswapl(target_fl->l_start);
3671 fl.l_len = tswapl(target_fl->l_len);
3672 fl.l_pid = tswapl(target_fl->l_pid);
3673 unlock_user_struct(target_fl, arg, 0);
3674 ret = get_errno(fcntl(fd, F_SETLK+(cmd-TARGET_F_SETLK), &fl));
3677 case TARGET_F_GETLK64:
3678 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3679 return -TARGET_EFAULT;
3680 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3681 fl64.l_whence = tswap16(target_fl64->l_whence);
3682 fl64.l_start = tswapl(target_fl64->l_start);
3683 fl64.l_len = tswapl(target_fl64->l_len);
3684 fl64.l_pid = tswap16(target_fl64->l_pid);
3685 unlock_user_struct(target_fl64, arg, 0);
3686 ret = get_errno(fcntl(fd, F_GETLK64, &fl64));
3688 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3689 return -TARGET_EFAULT;
3690 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3691 target_fl64->l_whence = tswap16(fl64.l_whence);
3692 target_fl64->l_start = tswapl(fl64.l_start);
3693 target_fl64->l_len = tswapl(fl64.l_len);
3694 target_fl64->l_pid = tswapl(fl64.l_pid);
3695 unlock_user_struct(target_fl64, arg, 1);
3698 case TARGET_F_SETLK64:
3699 case TARGET_F_SETLKW64:
3700 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3701 return -TARGET_EFAULT;
3702 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3703 fl64.l_whence = tswap16(target_fl64->l_whence);
3704 fl64.l_start = tswapl(target_fl64->l_start);
3705 fl64.l_len = tswapl(target_fl64->l_len);
3706 fl64.l_pid = tswap16(target_fl64->l_pid);
3707 unlock_user_struct(target_fl64, arg, 0);
3708 ret = get_errno(fcntl(fd, F_SETLK64+(cmd-TARGET_F_SETLK64), &fl64));
3712 ret = get_errno(fcntl(fd, cmd, arg));
3714 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3719 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3723 ret = get_errno(fcntl(fd, cmd, arg));
3731 static inline int high2lowuid(int uid)
3739 static inline int high2lowgid(int gid)
3747 static inline int low2highuid(int uid)
3749 if ((int16_t)uid == -1)
3755 static inline int low2highgid(int gid)
3757 if ((int16_t)gid == -1)
3763 #endif /* USE_UID16 */
3765 void syscall_init(void)
3768 const argtype *arg_type;
3772 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3773 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3774 #include "syscall_types.h"
3776 #undef STRUCT_SPECIAL
3778 /* we patch the ioctl size if necessary. We rely on the fact that
3779 no ioctl has all the bits at '1' in the size field */
3781 while (ie->target_cmd != 0) {
3782 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3783 TARGET_IOC_SIZEMASK) {
3784 arg_type = ie->arg_type;
3785 if (arg_type[0] != TYPE_PTR) {
3786 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3791 size = thunk_type_size(arg_type, 0);
3792 ie->target_cmd = (ie->target_cmd &
3793 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3794 (size << TARGET_IOC_SIZESHIFT);
3797 /* Build target_to_host_errno_table[] table from
3798 * host_to_target_errno_table[]. */
3799 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3800 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3802 /* automatic consistency check if same arch */
3803 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3804 (defined(__x86_64__) && defined(TARGET_X86_64))
3805 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3806 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3807 ie->name, ie->target_cmd, ie->host_cmd);
3814 #if TARGET_ABI_BITS == 32
3815 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3817 #ifdef TARGET_WORDS_BIGENDIAN
3818 return ((uint64_t)word0 << 32) | word1;
3820 return ((uint64_t)word1 << 32) | word0;
3823 #else /* TARGET_ABI_BITS == 32 */
3824 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3828 #endif /* TARGET_ABI_BITS != 32 */
3830 #ifdef TARGET_NR_truncate64
3831 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3837 if (((CPUARMState *)cpu_env)->eabi)
3843 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3847 #ifdef TARGET_NR_ftruncate64
3848 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3854 if (((CPUARMState *)cpu_env)->eabi)
3860 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3864 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3865 abi_ulong target_addr)
3867 struct target_timespec *target_ts;
3869 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3870 return -TARGET_EFAULT;
3871 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3872 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3873 unlock_user_struct(target_ts, target_addr, 0);
3877 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3878 struct timespec *host_ts)
3880 struct target_timespec *target_ts;
3882 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3883 return -TARGET_EFAULT;
3884 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3885 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3886 unlock_user_struct(target_ts, target_addr, 1);
3890 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3891 static inline abi_long host_to_target_stat64(void *cpu_env,
3892 abi_ulong target_addr,
3893 struct stat *host_st)
3896 if (((CPUARMState *)cpu_env)->eabi) {
3897 struct target_eabi_stat64 *target_st;
3899 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3900 return -TARGET_EFAULT;
3901 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3902 __put_user(host_st->st_dev, &target_st->st_dev);
3903 __put_user(host_st->st_ino, &target_st->st_ino);
3904 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3905 __put_user(host_st->st_ino, &target_st->__st_ino);
3907 __put_user(host_st->st_mode, &target_st->st_mode);
3908 __put_user(host_st->st_nlink, &target_st->st_nlink);
3909 __put_user(host_st->st_uid, &target_st->st_uid);
3910 __put_user(host_st->st_gid, &target_st->st_gid);
3911 __put_user(host_st->st_rdev, &target_st->st_rdev);
3912 __put_user(host_st->st_size, &target_st->st_size);
3913 __put_user(host_st->st_blksize, &target_st->st_blksize);
3914 __put_user(host_st->st_blocks, &target_st->st_blocks);
3915 __put_user(host_st->st_atime, &target_st->target_st_atime);
3916 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3917 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3918 unlock_user_struct(target_st, target_addr, 1);
3922 #if TARGET_LONG_BITS == 64
3923 struct target_stat *target_st;
3925 struct target_stat64 *target_st;
3928 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3929 return -TARGET_EFAULT;
3930 memset(target_st, 0, sizeof(*target_st));
3931 __put_user(host_st->st_dev, &target_st->st_dev);
3932 __put_user(host_st->st_ino, &target_st->st_ino);
3933 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3934 __put_user(host_st->st_ino, &target_st->__st_ino);
3936 __put_user(host_st->st_mode, &target_st->st_mode);
3937 __put_user(host_st->st_nlink, &target_st->st_nlink);
3938 __put_user(host_st->st_uid, &target_st->st_uid);
3939 __put_user(host_st->st_gid, &target_st->st_gid);
3940 __put_user(host_st->st_rdev, &target_st->st_rdev);
3941 /* XXX: better use of kernel struct */
3942 __put_user(host_st->st_size, &target_st->st_size);
3943 __put_user(host_st->st_blksize, &target_st->st_blksize);
3944 __put_user(host_st->st_blocks, &target_st->st_blocks);
3945 __put_user(host_st->st_atime, &target_st->target_st_atime);
3946 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3947 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3948 unlock_user_struct(target_st, target_addr, 1);
3955 #if defined(USE_NPTL)
3956 /* ??? Using host futex calls even when target atomic operations
3957 are not really atomic probably breaks things. However implementing
3958 futexes locally would make futexes shared between multiple processes
3959 tricky. However they're probably useless because guest atomic
3960 operations won't work either. */
3961 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3962 target_ulong uaddr2, int val3)
3964 struct timespec ts, *pts;
3966 /* ??? We assume FUTEX_* constants are the same on both host
3968 #ifdef FUTEX_CMD_MASK
3969 switch ((op&FUTEX_CMD_MASK)) {
3976 target_to_host_timespec(pts, timeout);
3980 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
3983 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
3985 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, g2h(uaddr2), val3 ));
3987 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
3989 return get_errno(sys_futex(g2h(uaddr), op, val,
3990 NULL, g2h(uaddr2), 0));
3991 case FUTEX_CMP_REQUEUE:
3992 return get_errno(sys_futex(g2h(uaddr), op, val,
3993 NULL, g2h(uaddr2), tswap32(val3)));
3995 return -TARGET_ENOSYS;
4000 /* Map host to target signal numbers for the wait family of syscalls.
4001 Assume all other status bits are the same. */
4002 static int host_to_target_waitstatus(int status)
4004 if (WIFSIGNALED(status)) {
4005 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4007 if (WIFSTOPPED(status)) {
4008 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4014 int get_osversion(void)
4016 static int osversion;
4017 struct new_utsname buf;
4022 if (qemu_uname_release && *qemu_uname_release) {
4023 s = qemu_uname_release;
4025 if (sys_uname(&buf))
4030 for (i = 0; i < 3; i++) {
4032 while (*s >= '0' && *s <= '9') {
4037 tmp = (tmp << 8) + n;
4045 /* do_syscall() should always have a single exit point at the end so
4046 that actions, such as logging of syscall results, can be performed.
4047 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4048 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4049 abi_long arg2, abi_long arg3, abi_long arg4,
4050 abi_long arg5, abi_long arg6)
4058 gemu_log("syscall %d", num);
4061 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4064 case TARGET_NR_exit:
4066 /* In old applications this may be used to implement _exit(2).
4067 However in threaded applictions it is used for thread termination,
4068 and _exit_group is used for application termination.
4069 Do thread termination if we have more then one thread. */
4070 /* FIXME: This probably breaks if a signal arrives. We should probably
4071 be disabling signals. */
4072 if (first_cpu->next_cpu) {
4079 while (p && p != (CPUState *)cpu_env) {
4080 lastp = &p->next_cpu;
4083 /* If we didn't find the CPU for this thread then something is
4087 /* Remove the CPU from the list. */
4088 *lastp = p->next_cpu;
4090 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4091 if (ts->child_tidptr) {
4092 put_user_u32(0, ts->child_tidptr);
4093 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4096 /* TODO: Free CPU state. */
4103 gdb_exit(cpu_env, arg1);
4105 ret = 0; /* avoid warning */
4107 case TARGET_NR_read:
4111 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4113 ret = get_errno(read(arg1, p, arg3));
4114 unlock_user(p, arg2, ret);
4117 case TARGET_NR_write:
4118 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4120 ret = get_errno(write(arg1, p, arg3));
4121 unlock_user(p, arg2, 0);
4123 case TARGET_NR_open:
4124 if (!(p = lock_user_string(arg1)))
4126 ret = get_errno(open(path(p),
4127 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4129 unlock_user(p, arg1, 0);
4131 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4132 case TARGET_NR_openat:
4133 if (!(p = lock_user_string(arg2)))
4135 ret = get_errno(sys_openat(arg1,
4137 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4139 unlock_user(p, arg2, 0);
4142 case TARGET_NR_close:
4143 ret = get_errno(close(arg1));
4148 case TARGET_NR_fork:
4149 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4151 #ifdef TARGET_NR_waitpid
4152 case TARGET_NR_waitpid:
4155 ret = get_errno(waitpid(arg1, &status, arg3));
4156 if (!is_error(ret) && arg2
4157 && put_user_s32(host_to_target_waitstatus(status), arg2))
4162 #ifdef TARGET_NR_waitid
4163 case TARGET_NR_waitid:
4167 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4168 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4169 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4171 host_to_target_siginfo(p, &info);
4172 unlock_user(p, arg3, sizeof(target_siginfo_t));
4177 #ifdef TARGET_NR_creat /* not on alpha */
4178 case TARGET_NR_creat:
4179 if (!(p = lock_user_string(arg1)))
4181 ret = get_errno(creat(p, arg2));
4182 unlock_user(p, arg1, 0);
4185 case TARGET_NR_link:
4188 p = lock_user_string(arg1);
4189 p2 = lock_user_string(arg2);
4191 ret = -TARGET_EFAULT;
4193 ret = get_errno(link(p, p2));
4194 unlock_user(p2, arg2, 0);
4195 unlock_user(p, arg1, 0);
4198 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4199 case TARGET_NR_linkat:
4204 p = lock_user_string(arg2);
4205 p2 = lock_user_string(arg4);
4207 ret = -TARGET_EFAULT;
4209 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4210 unlock_user(p, arg2, 0);
4211 unlock_user(p2, arg4, 0);
4215 case TARGET_NR_unlink:
4216 if (!(p = lock_user_string(arg1)))
4218 ret = get_errno(unlink(p));
4219 unlock_user(p, arg1, 0);
4221 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4222 case TARGET_NR_unlinkat:
4223 if (!(p = lock_user_string(arg2)))
4225 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4226 unlock_user(p, arg2, 0);
4229 case TARGET_NR_execve:
4231 char **argp, **envp;
4234 abi_ulong guest_argp;
4235 abi_ulong guest_envp;
4241 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4242 if (get_user_ual(addr, gp))
4250 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4251 if (get_user_ual(addr, gp))
4258 argp = alloca((argc + 1) * sizeof(void *));
4259 envp = alloca((envc + 1) * sizeof(void *));
4261 for (gp = guest_argp, q = argp; gp;
4262 gp += sizeof(abi_ulong), q++) {
4263 if (get_user_ual(addr, gp))
4267 if (!(*q = lock_user_string(addr)))
4272 for (gp = guest_envp, q = envp; gp;
4273 gp += sizeof(abi_ulong), q++) {
4274 if (get_user_ual(addr, gp))
4278 if (!(*q = lock_user_string(addr)))
4283 if (!(p = lock_user_string(arg1)))
4285 ret = get_errno(execve(p, argp, envp));
4286 unlock_user(p, arg1, 0);
4291 ret = -TARGET_EFAULT;
4294 for (gp = guest_argp, q = argp; *q;
4295 gp += sizeof(abi_ulong), q++) {
4296 if (get_user_ual(addr, gp)
4299 unlock_user(*q, addr, 0);
4301 for (gp = guest_envp, q = envp; *q;
4302 gp += sizeof(abi_ulong), q++) {
4303 if (get_user_ual(addr, gp)
4306 unlock_user(*q, addr, 0);
4310 case TARGET_NR_chdir:
4311 if (!(p = lock_user_string(arg1)))
4313 ret = get_errno(chdir(p));
4314 unlock_user(p, arg1, 0);
4316 #ifdef TARGET_NR_time
4317 case TARGET_NR_time:
4320 ret = get_errno(time(&host_time));
4323 && put_user_sal(host_time, arg1))
4328 case TARGET_NR_mknod:
4329 if (!(p = lock_user_string(arg1)))
4331 ret = get_errno(mknod(p, arg2, arg3));
4332 unlock_user(p, arg1, 0);
4334 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4335 case TARGET_NR_mknodat:
4336 if (!(p = lock_user_string(arg2)))
4338 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4339 unlock_user(p, arg2, 0);
4342 case TARGET_NR_chmod:
4343 if (!(p = lock_user_string(arg1)))
4345 ret = get_errno(chmod(p, arg2));
4346 unlock_user(p, arg1, 0);
4348 #ifdef TARGET_NR_break
4349 case TARGET_NR_break:
4352 #ifdef TARGET_NR_oldstat
4353 case TARGET_NR_oldstat:
4356 case TARGET_NR_lseek:
4357 ret = get_errno(lseek(arg1, arg2, arg3));
4359 #ifdef TARGET_NR_getxpid
4360 case TARGET_NR_getxpid:
4362 case TARGET_NR_getpid:
4364 ret = get_errno(getpid());
4366 case TARGET_NR_mount:
4368 /* need to look at the data field */
4370 p = lock_user_string(arg1);
4371 p2 = lock_user_string(arg2);
4372 p3 = lock_user_string(arg3);
4373 if (!p || !p2 || !p3)
4374 ret = -TARGET_EFAULT;
4376 /* FIXME - arg5 should be locked, but it isn't clear how to
4377 * do that since it's not guaranteed to be a NULL-terminated
4380 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4381 unlock_user(p, arg1, 0);
4382 unlock_user(p2, arg2, 0);
4383 unlock_user(p3, arg3, 0);
4386 #ifdef TARGET_NR_umount
4387 case TARGET_NR_umount:
4388 if (!(p = lock_user_string(arg1)))
4390 ret = get_errno(umount(p));
4391 unlock_user(p, arg1, 0);
4394 #ifdef TARGET_NR_stime /* not on alpha */
4395 case TARGET_NR_stime:
4398 if (get_user_sal(host_time, arg1))
4400 ret = get_errno(stime(&host_time));
4404 case TARGET_NR_ptrace:
4406 #ifdef TARGET_NR_alarm /* not on alpha */
4407 case TARGET_NR_alarm:
4411 #ifdef TARGET_NR_oldfstat
4412 case TARGET_NR_oldfstat:
4415 #ifdef TARGET_NR_pause /* not on alpha */
4416 case TARGET_NR_pause:
4417 ret = get_errno(pause());
4420 #ifdef TARGET_NR_utime
4421 case TARGET_NR_utime:
4423 struct utimbuf tbuf, *host_tbuf;
4424 struct target_utimbuf *target_tbuf;
4426 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4428 tbuf.actime = tswapl(target_tbuf->actime);
4429 tbuf.modtime = tswapl(target_tbuf->modtime);
4430 unlock_user_struct(target_tbuf, arg2, 0);
4435 if (!(p = lock_user_string(arg1)))
4437 ret = get_errno(utime(p, host_tbuf));
4438 unlock_user(p, arg1, 0);
4442 case TARGET_NR_utimes:
4444 struct timeval *tvp, tv[2];
4446 if (copy_from_user_timeval(&tv[0], arg2)
4447 || copy_from_user_timeval(&tv[1],
4448 arg2 + sizeof(struct target_timeval)))
4454 if (!(p = lock_user_string(arg1)))
4456 ret = get_errno(utimes(p, tvp));
4457 unlock_user(p, arg1, 0);
4460 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4461 case TARGET_NR_futimesat:
4463 struct timeval *tvp, tv[2];
4465 if (copy_from_user_timeval(&tv[0], arg3)
4466 || copy_from_user_timeval(&tv[1],
4467 arg3 + sizeof(struct target_timeval)))
4473 if (!(p = lock_user_string(arg2)))
4475 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4476 unlock_user(p, arg2, 0);
4480 #ifdef TARGET_NR_stty
4481 case TARGET_NR_stty:
4484 #ifdef TARGET_NR_gtty
4485 case TARGET_NR_gtty:
4488 case TARGET_NR_access:
4489 if (!(p = lock_user_string(arg1)))
4491 ret = get_errno(access(p, arg2));
4492 unlock_user(p, arg1, 0);
4494 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4495 case TARGET_NR_faccessat:
4496 if (!(p = lock_user_string(arg2)))
4498 ret = get_errno(sys_faccessat(arg1, p, arg3));
4499 unlock_user(p, arg2, 0);
4502 #ifdef TARGET_NR_nice /* not on alpha */
4503 case TARGET_NR_nice:
4504 ret = get_errno(nice(arg1));
4507 #ifdef TARGET_NR_ftime
4508 case TARGET_NR_ftime:
4511 case TARGET_NR_sync:
4515 case TARGET_NR_kill:
4516 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4518 case TARGET_NR_rename:
4521 p = lock_user_string(arg1);
4522 p2 = lock_user_string(arg2);
4524 ret = -TARGET_EFAULT;
4526 ret = get_errno(rename(p, p2));
4527 unlock_user(p2, arg2, 0);
4528 unlock_user(p, arg1, 0);
4531 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4532 case TARGET_NR_renameat:
4535 p = lock_user_string(arg2);
4536 p2 = lock_user_string(arg4);
4538 ret = -TARGET_EFAULT;
4540 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4541 unlock_user(p2, arg4, 0);
4542 unlock_user(p, arg2, 0);
4546 case TARGET_NR_mkdir:
4547 if (!(p = lock_user_string(arg1)))
4549 ret = get_errno(mkdir(p, arg2));
4550 unlock_user(p, arg1, 0);
4552 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4553 case TARGET_NR_mkdirat:
4554 if (!(p = lock_user_string(arg2)))
4556 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4557 unlock_user(p, arg2, 0);
4560 case TARGET_NR_rmdir:
4561 if (!(p = lock_user_string(arg1)))
4563 ret = get_errno(rmdir(p));
4564 unlock_user(p, arg1, 0);
4567 ret = get_errno(dup(arg1));
4569 case TARGET_NR_pipe:
4570 ret = do_pipe(cpu_env, arg1, 0);
4572 #ifdef TARGET_NR_pipe2
4573 case TARGET_NR_pipe2:
4574 ret = do_pipe(cpu_env, arg1, arg2);
4577 case TARGET_NR_times:
4579 struct target_tms *tmsp;
4581 ret = get_errno(times(&tms));
4583 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4586 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4587 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4588 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4589 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4592 ret = host_to_target_clock_t(ret);
4595 #ifdef TARGET_NR_prof
4596 case TARGET_NR_prof:
4599 #ifdef TARGET_NR_signal
4600 case TARGET_NR_signal:
4603 case TARGET_NR_acct:
4605 ret = get_errno(acct(NULL));
4607 if (!(p = lock_user_string(arg1)))
4609 ret = get_errno(acct(path(p)));
4610 unlock_user(p, arg1, 0);
4613 #ifdef TARGET_NR_umount2 /* not on alpha */
4614 case TARGET_NR_umount2:
4615 if (!(p = lock_user_string(arg1)))
4617 ret = get_errno(umount2(p, arg2));
4618 unlock_user(p, arg1, 0);
4621 #ifdef TARGET_NR_lock
4622 case TARGET_NR_lock:
4625 case TARGET_NR_ioctl:
4626 ret = do_ioctl(arg1, arg2, arg3);
4628 case TARGET_NR_fcntl:
4629 ret = do_fcntl(arg1, arg2, arg3);
4631 #ifdef TARGET_NR_mpx
4635 case TARGET_NR_setpgid:
4636 ret = get_errno(setpgid(arg1, arg2));
4638 #ifdef TARGET_NR_ulimit
4639 case TARGET_NR_ulimit:
4642 #ifdef TARGET_NR_oldolduname
4643 case TARGET_NR_oldolduname:
4646 case TARGET_NR_umask:
4647 ret = get_errno(umask(arg1));
4649 case TARGET_NR_chroot:
4650 if (!(p = lock_user_string(arg1)))
4652 ret = get_errno(chroot(p));
4653 unlock_user(p, arg1, 0);
4655 case TARGET_NR_ustat:
4657 case TARGET_NR_dup2:
4658 ret = get_errno(dup2(arg1, arg2));
4660 #ifdef TARGET_NR_getppid /* not on alpha */
4661 case TARGET_NR_getppid:
4662 ret = get_errno(getppid());
4665 case TARGET_NR_getpgrp:
4666 ret = get_errno(getpgrp());
4668 case TARGET_NR_setsid:
4669 ret = get_errno(setsid());
4671 #ifdef TARGET_NR_sigaction
4672 case TARGET_NR_sigaction:
4674 #if !defined(TARGET_MIPS)
4675 struct target_old_sigaction *old_act;
4676 struct target_sigaction act, oact, *pact;
4678 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4680 act._sa_handler = old_act->_sa_handler;
4681 target_siginitset(&act.sa_mask, old_act->sa_mask);
4682 act.sa_flags = old_act->sa_flags;
4683 act.sa_restorer = old_act->sa_restorer;
4684 unlock_user_struct(old_act, arg2, 0);
4689 ret = get_errno(do_sigaction(arg1, pact, &oact));
4690 if (!is_error(ret) && arg3) {
4691 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4693 old_act->_sa_handler = oact._sa_handler;
4694 old_act->sa_mask = oact.sa_mask.sig[0];
4695 old_act->sa_flags = oact.sa_flags;
4696 old_act->sa_restorer = oact.sa_restorer;
4697 unlock_user_struct(old_act, arg3, 1);
4700 struct target_sigaction act, oact, *pact, *old_act;
4703 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4705 act._sa_handler = old_act->_sa_handler;
4706 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4707 act.sa_flags = old_act->sa_flags;
4708 unlock_user_struct(old_act, arg2, 0);
4714 ret = get_errno(do_sigaction(arg1, pact, &oact));
4716 if (!is_error(ret) && arg3) {
4717 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4719 old_act->_sa_handler = oact._sa_handler;
4720 old_act->sa_flags = oact.sa_flags;
4721 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4722 old_act->sa_mask.sig[1] = 0;
4723 old_act->sa_mask.sig[2] = 0;
4724 old_act->sa_mask.sig[3] = 0;
4725 unlock_user_struct(old_act, arg3, 1);
4731 case TARGET_NR_rt_sigaction:
4733 struct target_sigaction *act;
4734 struct target_sigaction *oact;
4737 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4742 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4743 ret = -TARGET_EFAULT;
4744 goto rt_sigaction_fail;
4748 ret = get_errno(do_sigaction(arg1, act, oact));
4751 unlock_user_struct(act, arg2, 0);
4753 unlock_user_struct(oact, arg3, 1);
4756 #ifdef TARGET_NR_sgetmask /* not on alpha */
4757 case TARGET_NR_sgetmask:
4760 abi_ulong target_set;
4761 sigprocmask(0, NULL, &cur_set);
4762 host_to_target_old_sigset(&target_set, &cur_set);
4767 #ifdef TARGET_NR_ssetmask /* not on alpha */
4768 case TARGET_NR_ssetmask:
4770 sigset_t set, oset, cur_set;
4771 abi_ulong target_set = arg1;
4772 sigprocmask(0, NULL, &cur_set);
4773 target_to_host_old_sigset(&set, &target_set);
4774 sigorset(&set, &set, &cur_set);
4775 sigprocmask(SIG_SETMASK, &set, &oset);
4776 host_to_target_old_sigset(&target_set, &oset);
4781 #ifdef TARGET_NR_sigprocmask
4782 case TARGET_NR_sigprocmask:
4785 sigset_t set, oldset, *set_ptr;
4789 case TARGET_SIG_BLOCK:
4792 case TARGET_SIG_UNBLOCK:
4795 case TARGET_SIG_SETMASK:
4799 ret = -TARGET_EINVAL;
4802 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4804 target_to_host_old_sigset(&set, p);
4805 unlock_user(p, arg2, 0);
4811 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4812 if (!is_error(ret) && arg3) {
4813 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4815 host_to_target_old_sigset(p, &oldset);
4816 unlock_user(p, arg3, sizeof(target_sigset_t));
4821 case TARGET_NR_rt_sigprocmask:
4824 sigset_t set, oldset, *set_ptr;
4828 case TARGET_SIG_BLOCK:
4831 case TARGET_SIG_UNBLOCK:
4834 case TARGET_SIG_SETMASK:
4838 ret = -TARGET_EINVAL;
4841 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4843 target_to_host_sigset(&set, p);
4844 unlock_user(p, arg2, 0);
4850 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4851 if (!is_error(ret) && arg3) {
4852 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4854 host_to_target_sigset(p, &oldset);
4855 unlock_user(p, arg3, sizeof(target_sigset_t));
4859 #ifdef TARGET_NR_sigpending
4860 case TARGET_NR_sigpending:
4863 ret = get_errno(sigpending(&set));
4864 if (!is_error(ret)) {
4865 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4867 host_to_target_old_sigset(p, &set);
4868 unlock_user(p, arg1, sizeof(target_sigset_t));
4873 case TARGET_NR_rt_sigpending:
4876 ret = get_errno(sigpending(&set));
4877 if (!is_error(ret)) {
4878 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4880 host_to_target_sigset(p, &set);
4881 unlock_user(p, arg1, sizeof(target_sigset_t));
4885 #ifdef TARGET_NR_sigsuspend
4886 case TARGET_NR_sigsuspend:
4889 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4891 target_to_host_old_sigset(&set, p);
4892 unlock_user(p, arg1, 0);
4893 ret = get_errno(sigsuspend(&set));
4897 case TARGET_NR_rt_sigsuspend:
4900 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4902 target_to_host_sigset(&set, p);
4903 unlock_user(p, arg1, 0);
4904 ret = get_errno(sigsuspend(&set));
4907 case TARGET_NR_rt_sigtimedwait:
4910 struct timespec uts, *puts;
4913 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4915 target_to_host_sigset(&set, p);
4916 unlock_user(p, arg1, 0);
4919 target_to_host_timespec(puts, arg3);
4923 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4924 if (!is_error(ret) && arg2) {
4925 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4927 host_to_target_siginfo(p, &uinfo);
4928 unlock_user(p, arg2, sizeof(target_siginfo_t));
4932 case TARGET_NR_rt_sigqueueinfo:
4935 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4937 target_to_host_siginfo(&uinfo, p);
4938 unlock_user(p, arg1, 0);
4939 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4942 #ifdef TARGET_NR_sigreturn
4943 case TARGET_NR_sigreturn:
4944 /* NOTE: ret is eax, so not transcoding must be done */
4945 ret = do_sigreturn(cpu_env);
4948 case TARGET_NR_rt_sigreturn:
4949 /* NOTE: ret is eax, so not transcoding must be done */
4950 ret = do_rt_sigreturn(cpu_env);
4952 case TARGET_NR_sethostname:
4953 if (!(p = lock_user_string(arg1)))
4955 ret = get_errno(sethostname(p, arg2));
4956 unlock_user(p, arg1, 0);
4958 case TARGET_NR_setrlimit:
4960 /* XXX: convert resource ? */
4961 int resource = arg1;
4962 struct target_rlimit *target_rlim;
4964 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4966 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4967 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4968 unlock_user_struct(target_rlim, arg2, 0);
4969 ret = get_errno(setrlimit(resource, &rlim));
4972 case TARGET_NR_getrlimit:
4974 /* XXX: convert resource ? */
4975 int resource = arg1;
4976 struct target_rlimit *target_rlim;
4979 ret = get_errno(getrlimit(resource, &rlim));
4980 if (!is_error(ret)) {
4981 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4983 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4984 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4985 unlock_user_struct(target_rlim, arg2, 1);
4989 case TARGET_NR_getrusage:
4991 struct rusage rusage;
4992 ret = get_errno(getrusage(arg1, &rusage));
4993 if (!is_error(ret)) {
4994 host_to_target_rusage(arg2, &rusage);
4998 case TARGET_NR_gettimeofday:
5001 ret = get_errno(gettimeofday(&tv, NULL));
5002 if (!is_error(ret)) {
5003 if (copy_to_user_timeval(arg1, &tv))
5008 case TARGET_NR_settimeofday:
5011 if (copy_from_user_timeval(&tv, arg1))
5013 ret = get_errno(settimeofday(&tv, NULL));
5016 #ifdef TARGET_NR_select
5017 case TARGET_NR_select:
5019 struct target_sel_arg_struct *sel;
5020 abi_ulong inp, outp, exp, tvp;
5023 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5025 nsel = tswapl(sel->n);
5026 inp = tswapl(sel->inp);
5027 outp = tswapl(sel->outp);
5028 exp = tswapl(sel->exp);
5029 tvp = tswapl(sel->tvp);
5030 unlock_user_struct(sel, arg1, 0);
5031 ret = do_select(nsel, inp, outp, exp, tvp);
5035 case TARGET_NR_symlink:
5038 p = lock_user_string(arg1);
5039 p2 = lock_user_string(arg2);
5041 ret = -TARGET_EFAULT;
5043 ret = get_errno(symlink(p, p2));
5044 unlock_user(p2, arg2, 0);
5045 unlock_user(p, arg1, 0);
5048 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5049 case TARGET_NR_symlinkat:
5052 p = lock_user_string(arg1);
5053 p2 = lock_user_string(arg3);
5055 ret = -TARGET_EFAULT;
5057 ret = get_errno(sys_symlinkat(p, arg2, p2));
5058 unlock_user(p2, arg3, 0);
5059 unlock_user(p, arg1, 0);
5063 #ifdef TARGET_NR_oldlstat
5064 case TARGET_NR_oldlstat:
5067 case TARGET_NR_readlink:
5070 p = lock_user_string(arg1);
5071 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5073 ret = -TARGET_EFAULT;
5075 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5076 char real[PATH_MAX];
5077 temp = realpath(exec_path,real);
5078 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5079 snprintf((char *)p2, arg3, "%s", real);
5082 ret = get_errno(readlink(path(p), p2, arg3));
5084 unlock_user(p2, arg2, ret);
5085 unlock_user(p, arg1, 0);
5088 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5089 case TARGET_NR_readlinkat:
5092 p = lock_user_string(arg2);
5093 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5095 ret = -TARGET_EFAULT;
5097 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5098 unlock_user(p2, arg3, ret);
5099 unlock_user(p, arg2, 0);
5103 #ifdef TARGET_NR_uselib
5104 case TARGET_NR_uselib:
5107 #ifdef TARGET_NR_swapon
5108 case TARGET_NR_swapon:
5109 if (!(p = lock_user_string(arg1)))
5111 ret = get_errno(swapon(p, arg2));
5112 unlock_user(p, arg1, 0);
5115 case TARGET_NR_reboot:
5117 #ifdef TARGET_NR_readdir
5118 case TARGET_NR_readdir:
5121 #ifdef TARGET_NR_mmap
5122 case TARGET_NR_mmap:
5123 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5126 abi_ulong v1, v2, v3, v4, v5, v6;
5127 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5135 unlock_user(v, arg1, 0);
5136 ret = get_errno(target_mmap(v1, v2, v3,
5137 target_to_host_bitmask(v4, mmap_flags_tbl),
5141 ret = get_errno(target_mmap(arg1, arg2, arg3,
5142 target_to_host_bitmask(arg4, mmap_flags_tbl),
5148 #ifdef TARGET_NR_mmap2
5149 case TARGET_NR_mmap2:
5151 #define MMAP_SHIFT 12
5153 ret = get_errno(target_mmap(arg1, arg2, arg3,
5154 target_to_host_bitmask(arg4, mmap_flags_tbl),
5156 arg6 << MMAP_SHIFT));
5159 case TARGET_NR_munmap:
5160 ret = get_errno(target_munmap(arg1, arg2));
5162 case TARGET_NR_mprotect:
5163 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5165 #ifdef TARGET_NR_mremap
5166 case TARGET_NR_mremap:
5167 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5170 /* ??? msync/mlock/munlock are broken for softmmu. */
5171 #ifdef TARGET_NR_msync
5172 case TARGET_NR_msync:
5173 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5176 #ifdef TARGET_NR_mlock
5177 case TARGET_NR_mlock:
5178 ret = get_errno(mlock(g2h(arg1), arg2));
5181 #ifdef TARGET_NR_munlock
5182 case TARGET_NR_munlock:
5183 ret = get_errno(munlock(g2h(arg1), arg2));
5186 #ifdef TARGET_NR_mlockall
5187 case TARGET_NR_mlockall:
5188 ret = get_errno(mlockall(arg1));
5191 #ifdef TARGET_NR_munlockall
5192 case TARGET_NR_munlockall:
5193 ret = get_errno(munlockall());
5196 case TARGET_NR_truncate:
5197 if (!(p = lock_user_string(arg1)))
5199 ret = get_errno(truncate(p, arg2));
5200 unlock_user(p, arg1, 0);
5202 case TARGET_NR_ftruncate:
5203 ret = get_errno(ftruncate(arg1, arg2));
5205 case TARGET_NR_fchmod:
5206 ret = get_errno(fchmod(arg1, arg2));
5208 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5209 case TARGET_NR_fchmodat:
5210 if (!(p = lock_user_string(arg2)))
5212 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5213 unlock_user(p, arg2, 0);
5216 case TARGET_NR_getpriority:
5217 /* libc does special remapping of the return value of
5218 * sys_getpriority() so it's just easiest to call
5219 * sys_getpriority() directly rather than through libc. */
5220 ret = sys_getpriority(arg1, arg2);
5222 case TARGET_NR_setpriority:
5223 ret = get_errno(setpriority(arg1, arg2, arg3));
5225 #ifdef TARGET_NR_profil
5226 case TARGET_NR_profil:
5229 case TARGET_NR_statfs:
5230 if (!(p = lock_user_string(arg1)))
5232 ret = get_errno(statfs(path(p), &stfs));
5233 unlock_user(p, arg1, 0);
5235 if (!is_error(ret)) {
5236 struct target_statfs *target_stfs;
5238 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5240 __put_user(stfs.f_type, &target_stfs->f_type);
5241 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5242 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5243 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5244 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5245 __put_user(stfs.f_files, &target_stfs->f_files);
5246 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5247 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5248 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5249 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5250 unlock_user_struct(target_stfs, arg2, 1);
5253 case TARGET_NR_fstatfs:
5254 ret = get_errno(fstatfs(arg1, &stfs));
5255 goto convert_statfs;
5256 #ifdef TARGET_NR_statfs64
5257 case TARGET_NR_statfs64:
5258 if (!(p = lock_user_string(arg1)))
5260 ret = get_errno(statfs(path(p), &stfs));
5261 unlock_user(p, arg1, 0);
5263 if (!is_error(ret)) {
5264 struct target_statfs64 *target_stfs;
5266 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5268 __put_user(stfs.f_type, &target_stfs->f_type);
5269 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5270 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5271 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5272 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5273 __put_user(stfs.f_files, &target_stfs->f_files);
5274 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5275 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5276 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5277 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5278 unlock_user_struct(target_stfs, arg3, 1);
5281 case TARGET_NR_fstatfs64:
5282 ret = get_errno(fstatfs(arg1, &stfs));
5283 goto convert_statfs64;
5285 #ifdef TARGET_NR_ioperm
5286 case TARGET_NR_ioperm:
5289 #ifdef TARGET_NR_socketcall
5290 case TARGET_NR_socketcall:
5291 ret = do_socketcall(arg1, arg2);
5294 #ifdef TARGET_NR_accept
5295 case TARGET_NR_accept:
5296 ret = do_accept(arg1, arg2, arg3);
5299 #ifdef TARGET_NR_bind
5300 case TARGET_NR_bind:
5301 ret = do_bind(arg1, arg2, arg3);
5304 #ifdef TARGET_NR_connect
5305 case TARGET_NR_connect:
5306 ret = do_connect(arg1, arg2, arg3);
5309 #ifdef TARGET_NR_getpeername
5310 case TARGET_NR_getpeername:
5311 ret = do_getpeername(arg1, arg2, arg3);
5314 #ifdef TARGET_NR_getsockname
5315 case TARGET_NR_getsockname:
5316 ret = do_getsockname(arg1, arg2, arg3);
5319 #ifdef TARGET_NR_getsockopt
5320 case TARGET_NR_getsockopt:
5321 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5324 #ifdef TARGET_NR_listen
5325 case TARGET_NR_listen:
5326 ret = get_errno(listen(arg1, arg2));
5329 #ifdef TARGET_NR_recv
5330 case TARGET_NR_recv:
5331 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5334 #ifdef TARGET_NR_recvfrom
5335 case TARGET_NR_recvfrom:
5336 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5339 #ifdef TARGET_NR_recvmsg
5340 case TARGET_NR_recvmsg:
5341 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5344 #ifdef TARGET_NR_send
5345 case TARGET_NR_send:
5346 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5349 #ifdef TARGET_NR_sendmsg
5350 case TARGET_NR_sendmsg:
5351 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5354 #ifdef TARGET_NR_sendto
5355 case TARGET_NR_sendto:
5356 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5359 #ifdef TARGET_NR_shutdown
5360 case TARGET_NR_shutdown:
5361 ret = get_errno(shutdown(arg1, arg2));
5364 #ifdef TARGET_NR_socket
5365 case TARGET_NR_socket:
5366 ret = do_socket(arg1, arg2, arg3);
5369 #ifdef TARGET_NR_socketpair
5370 case TARGET_NR_socketpair:
5371 ret = do_socketpair(arg1, arg2, arg3, arg4);
5374 #ifdef TARGET_NR_setsockopt
5375 case TARGET_NR_setsockopt:
5376 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5380 case TARGET_NR_syslog:
5381 if (!(p = lock_user_string(arg2)))
5383 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5384 unlock_user(p, arg2, 0);
5387 case TARGET_NR_setitimer:
5389 struct itimerval value, ovalue, *pvalue;
5393 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5394 || copy_from_user_timeval(&pvalue->it_value,
5395 arg2 + sizeof(struct target_timeval)))
5400 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5401 if (!is_error(ret) && arg3) {
5402 if (copy_to_user_timeval(arg3,
5403 &ovalue.it_interval)
5404 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5410 case TARGET_NR_getitimer:
5412 struct itimerval value;
5414 ret = get_errno(getitimer(arg1, &value));
5415 if (!is_error(ret) && arg2) {
5416 if (copy_to_user_timeval(arg2,
5418 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5424 case TARGET_NR_stat:
5425 if (!(p = lock_user_string(arg1)))
5427 ret = get_errno(stat(path(p), &st));
5428 unlock_user(p, arg1, 0);
5430 case TARGET_NR_lstat:
5431 if (!(p = lock_user_string(arg1)))
5433 ret = get_errno(lstat(path(p), &st));
5434 unlock_user(p, arg1, 0);
5436 case TARGET_NR_fstat:
5438 ret = get_errno(fstat(arg1, &st));
5440 if (!is_error(ret)) {
5441 struct target_stat *target_st;
5443 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5445 __put_user(st.st_dev, &target_st->st_dev);
5446 __put_user(st.st_ino, &target_st->st_ino);
5447 __put_user(st.st_mode, &target_st->st_mode);
5448 __put_user(st.st_uid, &target_st->st_uid);
5449 __put_user(st.st_gid, &target_st->st_gid);
5450 __put_user(st.st_nlink, &target_st->st_nlink);
5451 __put_user(st.st_rdev, &target_st->st_rdev);
5452 __put_user(st.st_size, &target_st->st_size);
5453 __put_user(st.st_blksize, &target_st->st_blksize);
5454 __put_user(st.st_blocks, &target_st->st_blocks);
5455 __put_user(st.st_atime, &target_st->target_st_atime);
5456 __put_user(st.st_mtime, &target_st->target_st_mtime);
5457 __put_user(st.st_ctime, &target_st->target_st_ctime);
5458 unlock_user_struct(target_st, arg2, 1);
5462 #ifdef TARGET_NR_olduname
5463 case TARGET_NR_olduname:
5466 #ifdef TARGET_NR_iopl
5467 case TARGET_NR_iopl:
5470 case TARGET_NR_vhangup:
5471 ret = get_errno(vhangup());
5473 #ifdef TARGET_NR_idle
5474 case TARGET_NR_idle:
5477 #ifdef TARGET_NR_syscall
5478 case TARGET_NR_syscall:
5479 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5482 case TARGET_NR_wait4:
5485 abi_long status_ptr = arg2;
5486 struct rusage rusage, *rusage_ptr;
5487 abi_ulong target_rusage = arg4;
5489 rusage_ptr = &rusage;
5492 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5493 if (!is_error(ret)) {
5495 status = host_to_target_waitstatus(status);
5496 if (put_user_s32(status, status_ptr))
5500 host_to_target_rusage(target_rusage, &rusage);
5504 #ifdef TARGET_NR_swapoff
5505 case TARGET_NR_swapoff:
5506 if (!(p = lock_user_string(arg1)))
5508 ret = get_errno(swapoff(p));
5509 unlock_user(p, arg1, 0);
5512 case TARGET_NR_sysinfo:
5514 struct target_sysinfo *target_value;
5515 struct sysinfo value;
5516 ret = get_errno(sysinfo(&value));
5517 if (!is_error(ret) && arg1)
5519 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5521 __put_user(value.uptime, &target_value->uptime);
5522 __put_user(value.loads[0], &target_value->loads[0]);
5523 __put_user(value.loads[1], &target_value->loads[1]);
5524 __put_user(value.loads[2], &target_value->loads[2]);
5525 __put_user(value.totalram, &target_value->totalram);
5526 __put_user(value.freeram, &target_value->freeram);
5527 __put_user(value.sharedram, &target_value->sharedram);
5528 __put_user(value.bufferram, &target_value->bufferram);
5529 __put_user(value.totalswap, &target_value->totalswap);
5530 __put_user(value.freeswap, &target_value->freeswap);
5531 __put_user(value.procs, &target_value->procs);
5532 __put_user(value.totalhigh, &target_value->totalhigh);
5533 __put_user(value.freehigh, &target_value->freehigh);
5534 __put_user(value.mem_unit, &target_value->mem_unit);
5535 unlock_user_struct(target_value, arg1, 1);
5539 #ifdef TARGET_NR_ipc
5541 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5544 #ifdef TARGET_NR_semget
5545 case TARGET_NR_semget:
5546 ret = get_errno(semget(arg1, arg2, arg3));
5549 #ifdef TARGET_NR_semop
5550 case TARGET_NR_semop:
5551 ret = get_errno(do_semop(arg1, arg2, arg3));
5554 #ifdef TARGET_NR_semctl
5555 case TARGET_NR_semctl:
5556 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5559 #ifdef TARGET_NR_msgctl
5560 case TARGET_NR_msgctl:
5561 ret = do_msgctl(arg1, arg2, arg3);
5564 #ifdef TARGET_NR_msgget
5565 case TARGET_NR_msgget:
5566 ret = get_errno(msgget(arg1, arg2));
5569 #ifdef TARGET_NR_msgrcv
5570 case TARGET_NR_msgrcv:
5571 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5574 #ifdef TARGET_NR_msgsnd
5575 case TARGET_NR_msgsnd:
5576 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5579 #ifdef TARGET_NR_shmget
5580 case TARGET_NR_shmget:
5581 ret = get_errno(shmget(arg1, arg2, arg3));
5584 #ifdef TARGET_NR_shmctl
5585 case TARGET_NR_shmctl:
5586 ret = do_shmctl(arg1, arg2, arg3);
5589 #ifdef TARGET_NR_shmat
5590 case TARGET_NR_shmat:
5591 ret = do_shmat(arg1, arg2, arg3);
5594 #ifdef TARGET_NR_shmdt
5595 case TARGET_NR_shmdt:
5596 ret = do_shmdt(arg1);
5599 case TARGET_NR_fsync:
5600 ret = get_errno(fsync(arg1));
5602 case TARGET_NR_clone:
5603 #if defined(TARGET_SH4)
5604 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5605 #elif defined(TARGET_CRIS)
5606 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5608 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5611 #ifdef __NR_exit_group
5612 /* new thread calls */
5613 case TARGET_NR_exit_group:
5617 gdb_exit(cpu_env, arg1);
5618 ret = get_errno(exit_group(arg1));
5621 case TARGET_NR_setdomainname:
5622 if (!(p = lock_user_string(arg1)))
5624 ret = get_errno(setdomainname(p, arg2));
5625 unlock_user(p, arg1, 0);
5627 case TARGET_NR_uname:
5628 /* no need to transcode because we use the linux syscall */
5630 struct new_utsname * buf;
5632 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5634 ret = get_errno(sys_uname(buf));
5635 if (!is_error(ret)) {
5636 /* Overrite the native machine name with whatever is being
5638 strcpy (buf->machine, UNAME_MACHINE);
5639 /* Allow the user to override the reported release. */
5640 if (qemu_uname_release && *qemu_uname_release)
5641 strcpy (buf->release, qemu_uname_release);
5643 unlock_user_struct(buf, arg1, 1);
5647 case TARGET_NR_modify_ldt:
5648 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5650 #if !defined(TARGET_X86_64)
5651 case TARGET_NR_vm86old:
5653 case TARGET_NR_vm86:
5654 ret = do_vm86(cpu_env, arg1, arg2);
5658 case TARGET_NR_adjtimex:
5660 #ifdef TARGET_NR_create_module
5661 case TARGET_NR_create_module:
5663 case TARGET_NR_init_module:
5664 case TARGET_NR_delete_module:
5665 #ifdef TARGET_NR_get_kernel_syms
5666 case TARGET_NR_get_kernel_syms:
5669 case TARGET_NR_quotactl:
5671 case TARGET_NR_getpgid:
5672 ret = get_errno(getpgid(arg1));
5674 case TARGET_NR_fchdir:
5675 ret = get_errno(fchdir(arg1));
5677 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5678 case TARGET_NR_bdflush:
5681 #ifdef TARGET_NR_sysfs
5682 case TARGET_NR_sysfs:
5685 case TARGET_NR_personality:
5686 ret = get_errno(personality(arg1));
5688 #ifdef TARGET_NR_afs_syscall
5689 case TARGET_NR_afs_syscall:
5692 #ifdef TARGET_NR__llseek /* Not on alpha */
5693 case TARGET_NR__llseek:
5695 #if defined (__x86_64__)
5696 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5697 if (put_user_s64(ret, arg4))
5701 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5702 if (put_user_s64(res, arg4))
5708 case TARGET_NR_getdents:
5709 #if TARGET_ABI_BITS != 32
5711 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5713 struct target_dirent *target_dirp;
5714 struct linux_dirent *dirp;
5715 abi_long count = arg3;
5717 dirp = malloc(count);
5719 ret = -TARGET_ENOMEM;
5723 ret = get_errno(sys_getdents(arg1, dirp, count));
5724 if (!is_error(ret)) {
5725 struct linux_dirent *de;
5726 struct target_dirent *tde;
5728 int reclen, treclen;
5729 int count1, tnamelen;
5733 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5737 reclen = de->d_reclen;
5738 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5739 tde->d_reclen = tswap16(treclen);
5740 tde->d_ino = tswapl(de->d_ino);
5741 tde->d_off = tswapl(de->d_off);
5742 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5745 /* XXX: may not be correct */
5746 pstrcpy(tde->d_name, tnamelen, de->d_name);
5747 de = (struct linux_dirent *)((char *)de + reclen);
5749 tde = (struct target_dirent *)((char *)tde + treclen);
5753 unlock_user(target_dirp, arg2, ret);
5759 struct linux_dirent *dirp;
5760 abi_long count = arg3;
5762 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5764 ret = get_errno(sys_getdents(arg1, dirp, count));
5765 if (!is_error(ret)) {
5766 struct linux_dirent *de;
5771 reclen = de->d_reclen;
5774 de->d_reclen = tswap16(reclen);
5775 tswapls(&de->d_ino);
5776 tswapls(&de->d_off);
5777 de = (struct linux_dirent *)((char *)de + reclen);
5781 unlock_user(dirp, arg2, ret);
5785 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5786 case TARGET_NR_getdents64:
5788 struct linux_dirent64 *dirp;
5789 abi_long count = arg3;
5790 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5792 ret = get_errno(sys_getdents64(arg1, dirp, count));
5793 if (!is_error(ret)) {
5794 struct linux_dirent64 *de;
5799 reclen = de->d_reclen;
5802 de->d_reclen = tswap16(reclen);
5803 tswap64s((uint64_t *)&de->d_ino);
5804 tswap64s((uint64_t *)&de->d_off);
5805 de = (struct linux_dirent64 *)((char *)de + reclen);
5809 unlock_user(dirp, arg2, ret);
5812 #endif /* TARGET_NR_getdents64 */
5813 #ifdef TARGET_NR__newselect
5814 case TARGET_NR__newselect:
5815 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5818 #ifdef TARGET_NR_poll
5819 case TARGET_NR_poll:
5821 struct target_pollfd *target_pfd;
5822 unsigned int nfds = arg2;
5827 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5830 pfd = alloca(sizeof(struct pollfd) * nfds);
5831 for(i = 0; i < nfds; i++) {
5832 pfd[i].fd = tswap32(target_pfd[i].fd);
5833 pfd[i].events = tswap16(target_pfd[i].events);
5835 ret = get_errno(poll(pfd, nfds, timeout));
5836 if (!is_error(ret)) {
5837 for(i = 0; i < nfds; i++) {
5838 target_pfd[i].revents = tswap16(pfd[i].revents);
5840 ret += nfds * (sizeof(struct target_pollfd)
5841 - sizeof(struct pollfd));
5843 unlock_user(target_pfd, arg1, ret);
5847 case TARGET_NR_flock:
5848 /* NOTE: the flock constant seems to be the same for every
5850 ret = get_errno(flock(arg1, arg2));
5852 case TARGET_NR_readv:
5857 vec = alloca(count * sizeof(struct iovec));
5858 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5860 ret = get_errno(readv(arg1, vec, count));
5861 unlock_iovec(vec, arg2, count, 1);
5864 case TARGET_NR_writev:
5869 vec = alloca(count * sizeof(struct iovec));
5870 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5872 ret = get_errno(writev(arg1, vec, count));
5873 unlock_iovec(vec, arg2, count, 0);
5876 case TARGET_NR_getsid:
5877 ret = get_errno(getsid(arg1));
5879 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5880 case TARGET_NR_fdatasync:
5881 ret = get_errno(fdatasync(arg1));
5884 case TARGET_NR__sysctl:
5885 /* We don't implement this, but ENOTDIR is always a safe
5887 ret = -TARGET_ENOTDIR;
5889 case TARGET_NR_sched_setparam:
5891 struct sched_param *target_schp;
5892 struct sched_param schp;
5894 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5896 schp.sched_priority = tswap32(target_schp->sched_priority);
5897 unlock_user_struct(target_schp, arg2, 0);
5898 ret = get_errno(sched_setparam(arg1, &schp));
5901 case TARGET_NR_sched_getparam:
5903 struct sched_param *target_schp;
5904 struct sched_param schp;
5905 ret = get_errno(sched_getparam(arg1, &schp));
5906 if (!is_error(ret)) {
5907 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5909 target_schp->sched_priority = tswap32(schp.sched_priority);
5910 unlock_user_struct(target_schp, arg2, 1);
5914 case TARGET_NR_sched_setscheduler:
5916 struct sched_param *target_schp;
5917 struct sched_param schp;
5918 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5920 schp.sched_priority = tswap32(target_schp->sched_priority);
5921 unlock_user_struct(target_schp, arg3, 0);
5922 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5925 case TARGET_NR_sched_getscheduler:
5926 ret = get_errno(sched_getscheduler(arg1));
5928 case TARGET_NR_sched_yield:
5929 ret = get_errno(sched_yield());
5931 case TARGET_NR_sched_get_priority_max:
5932 ret = get_errno(sched_get_priority_max(arg1));
5934 case TARGET_NR_sched_get_priority_min:
5935 ret = get_errno(sched_get_priority_min(arg1));
5937 case TARGET_NR_sched_rr_get_interval:
5940 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5941 if (!is_error(ret)) {
5942 host_to_target_timespec(arg2, &ts);
5946 case TARGET_NR_nanosleep:
5948 struct timespec req, rem;
5949 target_to_host_timespec(&req, arg1);
5950 ret = get_errno(nanosleep(&req, &rem));
5951 if (is_error(ret) && arg2) {
5952 host_to_target_timespec(arg2, &rem);
5956 #ifdef TARGET_NR_query_module
5957 case TARGET_NR_query_module:
5960 #ifdef TARGET_NR_nfsservctl
5961 case TARGET_NR_nfsservctl:
5964 case TARGET_NR_prctl:
5967 case PR_GET_PDEATHSIG:
5970 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5971 if (!is_error(ret) && arg2
5972 && put_user_ual(deathsig, arg2))
5977 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5981 #ifdef TARGET_NR_arch_prctl
5982 case TARGET_NR_arch_prctl:
5983 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5984 ret = do_arch_prctl(cpu_env, arg1, arg2);
5990 #ifdef TARGET_NR_pread
5991 case TARGET_NR_pread:
5993 if (((CPUARMState *)cpu_env)->eabi)
5996 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5998 ret = get_errno(pread(arg1, p, arg3, arg4));
5999 unlock_user(p, arg2, ret);
6001 case TARGET_NR_pwrite:
6003 if (((CPUARMState *)cpu_env)->eabi)
6006 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6008 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6009 unlock_user(p, arg2, 0);
6012 #ifdef TARGET_NR_pread64
6013 case TARGET_NR_pread64:
6014 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6016 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6017 unlock_user(p, arg2, ret);
6019 case TARGET_NR_pwrite64:
6020 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6022 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6023 unlock_user(p, arg2, 0);
6026 case TARGET_NR_getcwd:
6027 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6029 ret = get_errno(sys_getcwd1(p, arg2));
6030 unlock_user(p, arg1, ret);
6032 case TARGET_NR_capget:
6034 case TARGET_NR_capset:
6036 case TARGET_NR_sigaltstack:
6037 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6038 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6039 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6044 case TARGET_NR_sendfile:
6046 #ifdef TARGET_NR_getpmsg
6047 case TARGET_NR_getpmsg:
6050 #ifdef TARGET_NR_putpmsg
6051 case TARGET_NR_putpmsg:
6054 #ifdef TARGET_NR_vfork
6055 case TARGET_NR_vfork:
6056 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6060 #ifdef TARGET_NR_ugetrlimit
6061 case TARGET_NR_ugetrlimit:
6064 ret = get_errno(getrlimit(arg1, &rlim));
6065 if (!is_error(ret)) {
6066 struct target_rlimit *target_rlim;
6067 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6069 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
6070 target_rlim->rlim_max = tswapl(rlim.rlim_max);
6071 unlock_user_struct(target_rlim, arg2, 1);
6076 #ifdef TARGET_NR_truncate64
6077 case TARGET_NR_truncate64:
6078 if (!(p = lock_user_string(arg1)))
6080 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6081 unlock_user(p, arg1, 0);
6084 #ifdef TARGET_NR_ftruncate64
6085 case TARGET_NR_ftruncate64:
6086 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6089 #ifdef TARGET_NR_stat64
6090 case TARGET_NR_stat64:
6091 if (!(p = lock_user_string(arg1)))
6093 ret = get_errno(stat(path(p), &st));
6094 unlock_user(p, arg1, 0);
6096 ret = host_to_target_stat64(cpu_env, arg2, &st);
6099 #ifdef TARGET_NR_lstat64
6100 case TARGET_NR_lstat64:
6101 if (!(p = lock_user_string(arg1)))
6103 ret = get_errno(lstat(path(p), &st));
6104 unlock_user(p, arg1, 0);
6106 ret = host_to_target_stat64(cpu_env, arg2, &st);
6109 #ifdef TARGET_NR_fstat64
6110 case TARGET_NR_fstat64:
6111 ret = get_errno(fstat(arg1, &st));
6113 ret = host_to_target_stat64(cpu_env, arg2, &st);
6116 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6117 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6118 #ifdef TARGET_NR_fstatat64
6119 case TARGET_NR_fstatat64:
6121 #ifdef TARGET_NR_newfstatat
6122 case TARGET_NR_newfstatat:
6124 if (!(p = lock_user_string(arg2)))
6126 #ifdef __NR_fstatat64
6127 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6129 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6132 ret = host_to_target_stat64(cpu_env, arg3, &st);
6136 case TARGET_NR_lchown:
6137 if (!(p = lock_user_string(arg1)))
6139 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6140 unlock_user(p, arg1, 0);
6142 case TARGET_NR_getuid:
6143 ret = get_errno(high2lowuid(getuid()));
6145 case TARGET_NR_getgid:
6146 ret = get_errno(high2lowgid(getgid()));
6148 case TARGET_NR_geteuid:
6149 ret = get_errno(high2lowuid(geteuid()));
6151 case TARGET_NR_getegid:
6152 ret = get_errno(high2lowgid(getegid()));
6154 case TARGET_NR_setreuid:
6155 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6157 case TARGET_NR_setregid:
6158 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6160 case TARGET_NR_getgroups:
6162 int gidsetsize = arg1;
6163 uint16_t *target_grouplist;
6167 grouplist = alloca(gidsetsize * sizeof(gid_t));
6168 ret = get_errno(getgroups(gidsetsize, grouplist));
6169 if (gidsetsize == 0)
6171 if (!is_error(ret)) {
6172 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6173 if (!target_grouplist)
6175 for(i = 0;i < ret; i++)
6176 target_grouplist[i] = tswap16(grouplist[i]);
6177 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6181 case TARGET_NR_setgroups:
6183 int gidsetsize = arg1;
6184 uint16_t *target_grouplist;
6188 grouplist = alloca(gidsetsize * sizeof(gid_t));
6189 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6190 if (!target_grouplist) {
6191 ret = -TARGET_EFAULT;
6194 for(i = 0;i < gidsetsize; i++)
6195 grouplist[i] = tswap16(target_grouplist[i]);
6196 unlock_user(target_grouplist, arg2, 0);
6197 ret = get_errno(setgroups(gidsetsize, grouplist));
6200 case TARGET_NR_fchown:
6201 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6203 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6204 case TARGET_NR_fchownat:
6205 if (!(p = lock_user_string(arg2)))
6207 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6208 unlock_user(p, arg2, 0);
6211 #ifdef TARGET_NR_setresuid
6212 case TARGET_NR_setresuid:
6213 ret = get_errno(setresuid(low2highuid(arg1),
6215 low2highuid(arg3)));
6218 #ifdef TARGET_NR_getresuid
6219 case TARGET_NR_getresuid:
6221 uid_t ruid, euid, suid;
6222 ret = get_errno(getresuid(&ruid, &euid, &suid));
6223 if (!is_error(ret)) {
6224 if (put_user_u16(high2lowuid(ruid), arg1)
6225 || put_user_u16(high2lowuid(euid), arg2)
6226 || put_user_u16(high2lowuid(suid), arg3))
6232 #ifdef TARGET_NR_getresgid
6233 case TARGET_NR_setresgid:
6234 ret = get_errno(setresgid(low2highgid(arg1),
6236 low2highgid(arg3)));
6239 #ifdef TARGET_NR_getresgid
6240 case TARGET_NR_getresgid:
6242 gid_t rgid, egid, sgid;
6243 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6244 if (!is_error(ret)) {
6245 if (put_user_u16(high2lowgid(rgid), arg1)
6246 || put_user_u16(high2lowgid(egid), arg2)
6247 || put_user_u16(high2lowgid(sgid), arg3))
6253 case TARGET_NR_chown:
6254 if (!(p = lock_user_string(arg1)))
6256 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6257 unlock_user(p, arg1, 0);
6259 case TARGET_NR_setuid:
6260 ret = get_errno(setuid(low2highuid(arg1)));
6262 case TARGET_NR_setgid:
6263 ret = get_errno(setgid(low2highgid(arg1)));
6265 case TARGET_NR_setfsuid:
6266 ret = get_errno(setfsuid(arg1));
6268 case TARGET_NR_setfsgid:
6269 ret = get_errno(setfsgid(arg1));
6271 #endif /* USE_UID16 */
6273 #ifdef TARGET_NR_lchown32
6274 case TARGET_NR_lchown32:
6275 if (!(p = lock_user_string(arg1)))
6277 ret = get_errno(lchown(p, arg2, arg3));
6278 unlock_user(p, arg1, 0);
6281 #ifdef TARGET_NR_getuid32
6282 case TARGET_NR_getuid32:
6283 ret = get_errno(getuid());
6287 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6288 /* Alpha specific */
6289 case TARGET_NR_getxuid:
6293 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6295 ret = get_errno(getuid());
6298 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6299 /* Alpha specific */
6300 case TARGET_NR_getxgid:
6304 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6306 ret = get_errno(getgid());
6310 #ifdef TARGET_NR_getgid32
6311 case TARGET_NR_getgid32:
6312 ret = get_errno(getgid());
6315 #ifdef TARGET_NR_geteuid32
6316 case TARGET_NR_geteuid32:
6317 ret = get_errno(geteuid());
6320 #ifdef TARGET_NR_getegid32
6321 case TARGET_NR_getegid32:
6322 ret = get_errno(getegid());
6325 #ifdef TARGET_NR_setreuid32
6326 case TARGET_NR_setreuid32:
6327 ret = get_errno(setreuid(arg1, arg2));
6330 #ifdef TARGET_NR_setregid32
6331 case TARGET_NR_setregid32:
6332 ret = get_errno(setregid(arg1, arg2));
6335 #ifdef TARGET_NR_getgroups32
6336 case TARGET_NR_getgroups32:
6338 int gidsetsize = arg1;
6339 uint32_t *target_grouplist;
6343 grouplist = alloca(gidsetsize * sizeof(gid_t));
6344 ret = get_errno(getgroups(gidsetsize, grouplist));
6345 if (gidsetsize == 0)
6347 if (!is_error(ret)) {
6348 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6349 if (!target_grouplist) {
6350 ret = -TARGET_EFAULT;
6353 for(i = 0;i < ret; i++)
6354 target_grouplist[i] = tswap32(grouplist[i]);
6355 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6360 #ifdef TARGET_NR_setgroups32
6361 case TARGET_NR_setgroups32:
6363 int gidsetsize = arg1;
6364 uint32_t *target_grouplist;
6368 grouplist = alloca(gidsetsize * sizeof(gid_t));
6369 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6370 if (!target_grouplist) {
6371 ret = -TARGET_EFAULT;
6374 for(i = 0;i < gidsetsize; i++)
6375 grouplist[i] = tswap32(target_grouplist[i]);
6376 unlock_user(target_grouplist, arg2, 0);
6377 ret = get_errno(setgroups(gidsetsize, grouplist));
6381 #ifdef TARGET_NR_fchown32
6382 case TARGET_NR_fchown32:
6383 ret = get_errno(fchown(arg1, arg2, arg3));
6386 #ifdef TARGET_NR_setresuid32
6387 case TARGET_NR_setresuid32:
6388 ret = get_errno(setresuid(arg1, arg2, arg3));
6391 #ifdef TARGET_NR_getresuid32
6392 case TARGET_NR_getresuid32:
6394 uid_t ruid, euid, suid;
6395 ret = get_errno(getresuid(&ruid, &euid, &suid));
6396 if (!is_error(ret)) {
6397 if (put_user_u32(ruid, arg1)
6398 || put_user_u32(euid, arg2)
6399 || put_user_u32(suid, arg3))
6405 #ifdef TARGET_NR_setresgid32
6406 case TARGET_NR_setresgid32:
6407 ret = get_errno(setresgid(arg1, arg2, arg3));
6410 #ifdef TARGET_NR_getresgid32
6411 case TARGET_NR_getresgid32:
6413 gid_t rgid, egid, sgid;
6414 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6415 if (!is_error(ret)) {
6416 if (put_user_u32(rgid, arg1)
6417 || put_user_u32(egid, arg2)
6418 || put_user_u32(sgid, arg3))
6424 #ifdef TARGET_NR_chown32
6425 case TARGET_NR_chown32:
6426 if (!(p = lock_user_string(arg1)))
6428 ret = get_errno(chown(p, arg2, arg3));
6429 unlock_user(p, arg1, 0);
6432 #ifdef TARGET_NR_setuid32
6433 case TARGET_NR_setuid32:
6434 ret = get_errno(setuid(arg1));
6437 #ifdef TARGET_NR_setgid32
6438 case TARGET_NR_setgid32:
6439 ret = get_errno(setgid(arg1));
6442 #ifdef TARGET_NR_setfsuid32
6443 case TARGET_NR_setfsuid32:
6444 ret = get_errno(setfsuid(arg1));
6447 #ifdef TARGET_NR_setfsgid32
6448 case TARGET_NR_setfsgid32:
6449 ret = get_errno(setfsgid(arg1));
6453 case TARGET_NR_pivot_root:
6455 #ifdef TARGET_NR_mincore
6456 case TARGET_NR_mincore:
6459 ret = -TARGET_EFAULT;
6460 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6462 if (!(p = lock_user_string(arg3)))
6464 ret = get_errno(mincore(a, arg2, p));
6465 unlock_user(p, arg3, ret);
6467 unlock_user(a, arg1, 0);
6471 #ifdef TARGET_NR_arm_fadvise64_64
6472 case TARGET_NR_arm_fadvise64_64:
6475 * arm_fadvise64_64 looks like fadvise64_64 but
6476 * with different argument order
6484 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6485 #ifdef TARGET_NR_fadvise64_64
6486 case TARGET_NR_fadvise64_64:
6488 /* This is a hint, so ignoring and returning success is ok. */
6492 #ifdef TARGET_NR_madvise
6493 case TARGET_NR_madvise:
6494 /* A straight passthrough may not be safe because qemu sometimes
6495 turns private flie-backed mappings into anonymous mappings.
6496 This will break MADV_DONTNEED.
6497 This is a hint, so ignoring and returning success is ok. */
6501 #if TARGET_ABI_BITS == 32
6502 case TARGET_NR_fcntl64:
6506 struct target_flock64 *target_fl;
6508 struct target_eabi_flock64 *target_efl;
6512 case TARGET_F_GETLK64:
6515 case TARGET_F_SETLK64:
6518 case TARGET_F_SETLKW64:
6527 case TARGET_F_GETLK64:
6529 if (((CPUARMState *)cpu_env)->eabi) {
6530 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6532 fl.l_type = tswap16(target_efl->l_type);
6533 fl.l_whence = tswap16(target_efl->l_whence);
6534 fl.l_start = tswap64(target_efl->l_start);
6535 fl.l_len = tswap64(target_efl->l_len);
6536 fl.l_pid = tswapl(target_efl->l_pid);
6537 unlock_user_struct(target_efl, arg3, 0);
6541 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6543 fl.l_type = tswap16(target_fl->l_type);
6544 fl.l_whence = tswap16(target_fl->l_whence);
6545 fl.l_start = tswap64(target_fl->l_start);
6546 fl.l_len = tswap64(target_fl->l_len);
6547 fl.l_pid = tswapl(target_fl->l_pid);
6548 unlock_user_struct(target_fl, arg3, 0);
6550 ret = get_errno(fcntl(arg1, cmd, &fl));
6553 if (((CPUARMState *)cpu_env)->eabi) {
6554 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6556 target_efl->l_type = tswap16(fl.l_type);
6557 target_efl->l_whence = tswap16(fl.l_whence);
6558 target_efl->l_start = tswap64(fl.l_start);
6559 target_efl->l_len = tswap64(fl.l_len);
6560 target_efl->l_pid = tswapl(fl.l_pid);
6561 unlock_user_struct(target_efl, arg3, 1);
6565 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6567 target_fl->l_type = tswap16(fl.l_type);
6568 target_fl->l_whence = tswap16(fl.l_whence);
6569 target_fl->l_start = tswap64(fl.l_start);
6570 target_fl->l_len = tswap64(fl.l_len);
6571 target_fl->l_pid = tswapl(fl.l_pid);
6572 unlock_user_struct(target_fl, arg3, 1);
6577 case TARGET_F_SETLK64:
6578 case TARGET_F_SETLKW64:
6580 if (((CPUARMState *)cpu_env)->eabi) {
6581 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6583 fl.l_type = tswap16(target_efl->l_type);
6584 fl.l_whence = tswap16(target_efl->l_whence);
6585 fl.l_start = tswap64(target_efl->l_start);
6586 fl.l_len = tswap64(target_efl->l_len);
6587 fl.l_pid = tswapl(target_efl->l_pid);
6588 unlock_user_struct(target_efl, arg3, 0);
6592 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6594 fl.l_type = tswap16(target_fl->l_type);
6595 fl.l_whence = tswap16(target_fl->l_whence);
6596 fl.l_start = tswap64(target_fl->l_start);
6597 fl.l_len = tswap64(target_fl->l_len);
6598 fl.l_pid = tswapl(target_fl->l_pid);
6599 unlock_user_struct(target_fl, arg3, 0);
6601 ret = get_errno(fcntl(arg1, cmd, &fl));
6604 ret = do_fcntl(arg1, arg2, arg3);
6610 #ifdef TARGET_NR_cacheflush
6611 case TARGET_NR_cacheflush:
6612 /* self-modifying code is handled automatically, so nothing needed */
6616 #ifdef TARGET_NR_security
6617 case TARGET_NR_security:
6620 #ifdef TARGET_NR_getpagesize
6621 case TARGET_NR_getpagesize:
6622 ret = TARGET_PAGE_SIZE;
6625 case TARGET_NR_gettid:
6626 ret = get_errno(gettid());
6628 #ifdef TARGET_NR_readahead
6629 case TARGET_NR_readahead:
6630 #if TARGET_ABI_BITS == 32
6632 if (((CPUARMState *)cpu_env)->eabi)
6639 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6641 ret = get_errno(readahead(arg1, arg2, arg3));
6645 #ifdef TARGET_NR_setxattr
6646 case TARGET_NR_setxattr:
6647 case TARGET_NR_lsetxattr:
6648 case TARGET_NR_fsetxattr:
6649 case TARGET_NR_getxattr:
6650 case TARGET_NR_lgetxattr:
6651 case TARGET_NR_fgetxattr:
6652 case TARGET_NR_listxattr:
6653 case TARGET_NR_llistxattr:
6654 case TARGET_NR_flistxattr:
6655 case TARGET_NR_removexattr:
6656 case TARGET_NR_lremovexattr:
6657 case TARGET_NR_fremovexattr:
6658 ret = -TARGET_EOPNOTSUPP;
6661 #ifdef TARGET_NR_set_thread_area
6662 case TARGET_NR_set_thread_area:
6663 #if defined(TARGET_MIPS)
6664 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6667 #elif defined(TARGET_CRIS)
6669 ret = -TARGET_EINVAL;
6671 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6675 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6676 ret = do_set_thread_area(cpu_env, arg1);
6679 goto unimplemented_nowarn;
6682 #ifdef TARGET_NR_get_thread_area
6683 case TARGET_NR_get_thread_area:
6684 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6685 ret = do_get_thread_area(cpu_env, arg1);
6687 goto unimplemented_nowarn;
6690 #ifdef TARGET_NR_getdomainname
6691 case TARGET_NR_getdomainname:
6692 goto unimplemented_nowarn;
6695 #ifdef TARGET_NR_clock_gettime
6696 case TARGET_NR_clock_gettime:
6699 ret = get_errno(clock_gettime(arg1, &ts));
6700 if (!is_error(ret)) {
6701 host_to_target_timespec(arg2, &ts);
6706 #ifdef TARGET_NR_clock_getres
6707 case TARGET_NR_clock_getres:
6710 ret = get_errno(clock_getres(arg1, &ts));
6711 if (!is_error(ret)) {
6712 host_to_target_timespec(arg2, &ts);
6717 #ifdef TARGET_NR_clock_nanosleep
6718 case TARGET_NR_clock_nanosleep:
6721 target_to_host_timespec(&ts, arg3);
6722 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6724 host_to_target_timespec(arg4, &ts);
6729 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6730 case TARGET_NR_set_tid_address:
6731 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6735 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6736 case TARGET_NR_tkill:
6737 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6741 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6742 case TARGET_NR_tgkill:
6743 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6744 target_to_host_signal(arg3)));
6748 #ifdef TARGET_NR_set_robust_list
6749 case TARGET_NR_set_robust_list:
6750 goto unimplemented_nowarn;
6753 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6754 case TARGET_NR_utimensat:
6756 struct timespec *tsp, ts[2];
6760 target_to_host_timespec(ts, arg3);
6761 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6765 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
6767 if (!(p = lock_user_string(arg2))) {
6768 ret = -TARGET_EFAULT;
6771 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
6772 unlock_user(p, arg2, 0);
6777 #if defined(USE_NPTL)
6778 case TARGET_NR_futex:
6779 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6782 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6783 case TARGET_NR_inotify_init:
6784 ret = get_errno(sys_inotify_init());
6787 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6788 case TARGET_NR_inotify_add_watch:
6789 p = lock_user_string(arg2);
6790 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6791 unlock_user(p, arg2, 0);
6794 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6795 case TARGET_NR_inotify_rm_watch:
6796 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6800 #ifdef TARGET_NR_mq_open
6801 case TARGET_NR_mq_open:
6803 struct mq_attr posix_mq_attr;
6805 p = lock_user_string(arg1 - 1);
6807 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6808 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6809 unlock_user (p, arg1, 0);
6813 case TARGET_NR_mq_unlink:
6814 p = lock_user_string(arg1 - 1);
6815 ret = get_errno(mq_unlink(p));
6816 unlock_user (p, arg1, 0);
6819 case TARGET_NR_mq_timedsend:
6823 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6825 target_to_host_timespec(&ts, arg5);
6826 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6827 host_to_target_timespec(arg5, &ts);
6830 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6831 unlock_user (p, arg2, arg3);
6835 case TARGET_NR_mq_timedreceive:
6840 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6842 target_to_host_timespec(&ts, arg5);
6843 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6844 host_to_target_timespec(arg5, &ts);
6847 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6848 unlock_user (p, arg2, arg3);
6850 put_user_u32(prio, arg4);
6854 /* Not implemented for now... */
6855 /* case TARGET_NR_mq_notify: */
6858 case TARGET_NR_mq_getsetattr:
6860 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6863 ret = mq_getattr(arg1, &posix_mq_attr_out);
6864 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6867 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6868 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6877 gemu_log("qemu: Unsupported syscall: %d\n", num);
6878 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6879 unimplemented_nowarn:
6881 ret = -TARGET_ENOSYS;
6886 gemu_log(" = %ld\n", ret);
6889 print_syscall_ret(num, ret);
6892 ret = -TARGET_EFAULT;