4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
81 #include "linux_loop.h"
84 #include "qemu-common.h"
87 #include <linux/futex.h>
88 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
89 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
91 /* XXX: Hardcode the above values. */
92 #define CLONE_NPTL_FLAGS2 0
97 //#include <linux/msdos_fs.h>
98 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
99 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
110 #define _syscall0(type,name) \
111 static type name (void) \
113 return syscall(__NR_##name); \
116 #define _syscall1(type,name,type1,arg1) \
117 static type name (type1 arg1) \
119 return syscall(__NR_##name, arg1); \
122 #define _syscall2(type,name,type1,arg1,type2,arg2) \
123 static type name (type1 arg1,type2 arg2) \
125 return syscall(__NR_##name, arg1, arg2); \
128 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
129 static type name (type1 arg1,type2 arg2,type3 arg3) \
131 return syscall(__NR_##name, arg1, arg2, arg3); \
134 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
135 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
137 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
140 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
148 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 type5,arg5,type6,arg6) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
157 #define __NR_sys_uname __NR_uname
158 #define __NR_sys_faccessat __NR_faccessat
159 #define __NR_sys_fchmodat __NR_fchmodat
160 #define __NR_sys_fchownat __NR_fchownat
161 #define __NR_sys_fstatat64 __NR_fstatat64
162 #define __NR_sys_futimesat __NR_futimesat
163 #define __NR_sys_getcwd1 __NR_getcwd
164 #define __NR_sys_getdents __NR_getdents
165 #define __NR_sys_getdents64 __NR_getdents64
166 #define __NR_sys_getpriority __NR_getpriority
167 #define __NR_sys_linkat __NR_linkat
168 #define __NR_sys_mkdirat __NR_mkdirat
169 #define __NR_sys_mknodat __NR_mknodat
170 #define __NR_sys_newfstatat __NR_newfstatat
171 #define __NR_sys_openat __NR_openat
172 #define __NR_sys_readlinkat __NR_readlinkat
173 #define __NR_sys_renameat __NR_renameat
174 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
175 #define __NR_sys_symlinkat __NR_symlinkat
176 #define __NR_sys_syslog __NR_syslog
177 #define __NR_sys_tgkill __NR_tgkill
178 #define __NR_sys_tkill __NR_tkill
179 #define __NR_sys_unlinkat __NR_unlinkat
180 #define __NR_sys_utimensat __NR_utimensat
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
187 #define __NR__llseek __NR_lseek
191 _syscall0(int, gettid)
193 /* This is a replacement for the host gettid() and must return a host
195 static int gettid(void) {
199 #if TARGET_ABI_BITS == 32
200 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
203 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
205 _syscall2(int, sys_getpriority, int, which, int, who);
206 #if !defined (__x86_64__)
207 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
208 loff_t *, res, uint, wh);
210 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
211 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
212 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
213 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
215 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
216 _syscall2(int,sys_tkill,int,tid,int,sig)
218 #ifdef __NR_exit_group
219 _syscall1(int,exit_group,int,error_code)
221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
222 _syscall1(int,set_tid_address,int *,tidptr)
224 #if defined(USE_NPTL)
225 #if defined(TARGET_NR_futex) && defined(__NR_futex)
226 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
227 const struct timespec *,timeout,int *,uaddr2,int,val3)
231 static bitmask_transtbl fcntl_flags_tbl[] = {
232 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
233 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
234 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
235 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
236 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
237 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
238 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
239 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
240 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
241 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
242 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
243 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
244 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
245 #if defined(O_DIRECT)
246 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
251 #define COPY_UTSNAME_FIELD(dest, src) \
253 /* __NEW_UTS_LEN doesn't include terminating null */ \
254 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
255 (dest)[__NEW_UTS_LEN] = '\0'; \
258 static int sys_uname(struct new_utsname *buf)
260 struct utsname uts_buf;
262 if (uname(&uts_buf) < 0)
266 * Just in case these have some differences, we
267 * translate utsname to new_utsname (which is the
268 * struct linux kernel uses).
271 bzero(buf, sizeof (*buf));
272 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
273 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
274 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
275 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
276 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
278 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
282 #undef COPY_UTSNAME_FIELD
285 static int sys_getcwd1(char *buf, size_t size)
287 if (getcwd(buf, size) == NULL) {
288 /* getcwd() sets errno */
291 return strlen(buf)+1;
296 * Host system seems to have atfile syscall stubs available. We
297 * now enable them one by one as specified by target syscall_nr.h.
300 #ifdef TARGET_NR_faccessat
301 static int sys_faccessat(int dirfd, const char *pathname, int mode)
303 return (faccessat(dirfd, pathname, mode, 0));
306 #ifdef TARGET_NR_fchmodat
307 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
309 return (fchmodat(dirfd, pathname, mode, 0));
312 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
313 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
314 gid_t group, int flags)
316 return (fchownat(dirfd, pathname, owner, group, flags));
319 #ifdef __NR_fstatat64
320 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
323 return (fstatat(dirfd, pathname, buf, flags));
326 #ifdef __NR_newfstatat
327 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
330 return (fstatat(dirfd, pathname, buf, flags));
333 #ifdef TARGET_NR_futimesat
334 static int sys_futimesat(int dirfd, const char *pathname,
335 const struct timeval times[2])
337 return (futimesat(dirfd, pathname, times));
340 #ifdef TARGET_NR_linkat
341 static int sys_linkat(int olddirfd, const char *oldpath,
342 int newdirfd, const char *newpath, int flags)
344 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
347 #ifdef TARGET_NR_mkdirat
348 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
350 return (mkdirat(dirfd, pathname, mode));
353 #ifdef TARGET_NR_mknodat
354 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
357 return (mknodat(dirfd, pathname, mode, dev));
360 #ifdef TARGET_NR_openat
361 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
364 * open(2) has extra parameter 'mode' when called with
367 if ((flags & O_CREAT) != 0) {
372 * Get the 'mode' parameter and translate it to
376 mode = va_arg(ap, mode_t);
377 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
380 return (openat(dirfd, pathname, flags, mode));
382 return (openat(dirfd, pathname, flags));
385 #ifdef TARGET_NR_readlinkat
386 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
388 return (readlinkat(dirfd, pathname, buf, bufsiz));
391 #ifdef TARGET_NR_renameat
392 static int sys_renameat(int olddirfd, const char *oldpath,
393 int newdirfd, const char *newpath)
395 return (renameat(olddirfd, oldpath, newdirfd, newpath));
398 #ifdef TARGET_NR_symlinkat
399 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
401 return (symlinkat(oldpath, newdirfd, newpath));
404 #ifdef TARGET_NR_unlinkat
405 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
407 return (unlinkat(dirfd, pathname, flags));
410 #else /* !CONFIG_ATFILE */
413 * Try direct syscalls instead
415 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
416 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
418 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
419 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
421 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
422 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
423 uid_t,owner,gid_t,group,int,flags)
425 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
426 defined(__NR_fstatat64)
427 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
428 struct stat *,buf,int,flags)
430 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
431 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
432 const struct timeval *,times)
434 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
435 defined(__NR_newfstatat)
436 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
437 struct stat *,buf,int,flags)
439 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
440 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
441 int,newdirfd,const char *,newpath,int,flags)
443 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
444 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
446 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
447 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
448 mode_t,mode,dev_t,dev)
450 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
451 defined(__NR_newfstatat)
452 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
453 struct stat *,buf,int,flags)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
460 char *,buf,size_t,bufsize)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
464 int,newdirfd,const char *,newpath)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat,const char *,oldpath,
468 int,newdirfd,const char *,newpath)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd, const char *pathname,
478 const struct timespec times[2], int flags)
480 if (pathname == NULL)
481 return futimens(dirfd, times);
483 return utimensat(dirfd, pathname, times, flags);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
488 const struct timespec *,tsp,int,flags)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
504 return (inotify_add_watch(fd, pathname, mask));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd, int32_t wd)
510 return (inotify_rm_watch(fd, wd));
514 /* Userspace can usually survive runtime without inotify */
515 #undef TARGET_NR_inotify_init
516 #undef TARGET_NR_inotify_add_watch
517 #undef TARGET_NR_inotify_rm_watch
518 #endif /* CONFIG_INOTIFY */
521 extern int personality(int);
522 extern int flock(int, int);
523 extern int setfsuid(int);
524 extern int setfsgid(int);
525 extern int setgroups(int, gid_t *);
527 #define ERRNO_TABLE_SIZE 1200
529 /* target_to_host_errno_table[] is initialized from
530 * host_to_target_errno_table[] in syscall_init(). */
531 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
535 * This list is the union of errno values overridden in asm-<arch>/errno.h
536 * minus the errnos that are not actually generic to all archs.
538 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
539 [EIDRM] = TARGET_EIDRM,
540 [ECHRNG] = TARGET_ECHRNG,
541 [EL2NSYNC] = TARGET_EL2NSYNC,
542 [EL3HLT] = TARGET_EL3HLT,
543 [EL3RST] = TARGET_EL3RST,
544 [ELNRNG] = TARGET_ELNRNG,
545 [EUNATCH] = TARGET_EUNATCH,
546 [ENOCSI] = TARGET_ENOCSI,
547 [EL2HLT] = TARGET_EL2HLT,
548 [EDEADLK] = TARGET_EDEADLK,
549 [ENOLCK] = TARGET_ENOLCK,
550 [EBADE] = TARGET_EBADE,
551 [EBADR] = TARGET_EBADR,
552 [EXFULL] = TARGET_EXFULL,
553 [ENOANO] = TARGET_ENOANO,
554 [EBADRQC] = TARGET_EBADRQC,
555 [EBADSLT] = TARGET_EBADSLT,
556 [EBFONT] = TARGET_EBFONT,
557 [ENOSTR] = TARGET_ENOSTR,
558 [ENODATA] = TARGET_ENODATA,
559 [ETIME] = TARGET_ETIME,
560 [ENOSR] = TARGET_ENOSR,
561 [ENONET] = TARGET_ENONET,
562 [ENOPKG] = TARGET_ENOPKG,
563 [EREMOTE] = TARGET_EREMOTE,
564 [ENOLINK] = TARGET_ENOLINK,
565 [EADV] = TARGET_EADV,
566 [ESRMNT] = TARGET_ESRMNT,
567 [ECOMM] = TARGET_ECOMM,
568 [EPROTO] = TARGET_EPROTO,
569 [EDOTDOT] = TARGET_EDOTDOT,
570 [EMULTIHOP] = TARGET_EMULTIHOP,
571 [EBADMSG] = TARGET_EBADMSG,
572 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
573 [EOVERFLOW] = TARGET_EOVERFLOW,
574 [ENOTUNIQ] = TARGET_ENOTUNIQ,
575 [EBADFD] = TARGET_EBADFD,
576 [EREMCHG] = TARGET_EREMCHG,
577 [ELIBACC] = TARGET_ELIBACC,
578 [ELIBBAD] = TARGET_ELIBBAD,
579 [ELIBSCN] = TARGET_ELIBSCN,
580 [ELIBMAX] = TARGET_ELIBMAX,
581 [ELIBEXEC] = TARGET_ELIBEXEC,
582 [EILSEQ] = TARGET_EILSEQ,
583 [ENOSYS] = TARGET_ENOSYS,
584 [ELOOP] = TARGET_ELOOP,
585 [ERESTART] = TARGET_ERESTART,
586 [ESTRPIPE] = TARGET_ESTRPIPE,
587 [ENOTEMPTY] = TARGET_ENOTEMPTY,
588 [EUSERS] = TARGET_EUSERS,
589 [ENOTSOCK] = TARGET_ENOTSOCK,
590 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
591 [EMSGSIZE] = TARGET_EMSGSIZE,
592 [EPROTOTYPE] = TARGET_EPROTOTYPE,
593 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
594 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
595 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
596 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
597 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
598 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
599 [EADDRINUSE] = TARGET_EADDRINUSE,
600 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
601 [ENETDOWN] = TARGET_ENETDOWN,
602 [ENETUNREACH] = TARGET_ENETUNREACH,
603 [ENETRESET] = TARGET_ENETRESET,
604 [ECONNABORTED] = TARGET_ECONNABORTED,
605 [ECONNRESET] = TARGET_ECONNRESET,
606 [ENOBUFS] = TARGET_ENOBUFS,
607 [EISCONN] = TARGET_EISCONN,
608 [ENOTCONN] = TARGET_ENOTCONN,
609 [EUCLEAN] = TARGET_EUCLEAN,
610 [ENOTNAM] = TARGET_ENOTNAM,
611 [ENAVAIL] = TARGET_ENAVAIL,
612 [EISNAM] = TARGET_EISNAM,
613 [EREMOTEIO] = TARGET_EREMOTEIO,
614 [ESHUTDOWN] = TARGET_ESHUTDOWN,
615 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
616 [ETIMEDOUT] = TARGET_ETIMEDOUT,
617 [ECONNREFUSED] = TARGET_ECONNREFUSED,
618 [EHOSTDOWN] = TARGET_EHOSTDOWN,
619 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
620 [EALREADY] = TARGET_EALREADY,
621 [EINPROGRESS] = TARGET_EINPROGRESS,
622 [ESTALE] = TARGET_ESTALE,
623 [ECANCELED] = TARGET_ECANCELED,
624 [ENOMEDIUM] = TARGET_ENOMEDIUM,
625 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
627 [ENOKEY] = TARGET_ENOKEY,
630 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
633 [EKEYREVOKED] = TARGET_EKEYREVOKED,
636 [EKEYREJECTED] = TARGET_EKEYREJECTED,
639 [EOWNERDEAD] = TARGET_EOWNERDEAD,
641 #ifdef ENOTRECOVERABLE
642 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
646 static inline int host_to_target_errno(int err)
648 if(host_to_target_errno_table[err])
649 return host_to_target_errno_table[err];
653 static inline int target_to_host_errno(int err)
655 if (target_to_host_errno_table[err])
656 return target_to_host_errno_table[err];
660 static inline abi_long get_errno(abi_long ret)
663 return -host_to_target_errno(errno);
668 static inline int is_error(abi_long ret)
670 return (abi_ulong)ret >= (abi_ulong)(-4096);
673 char *target_strerror(int err)
675 return strerror(target_to_host_errno(err));
678 static abi_ulong target_brk;
679 static abi_ulong target_original_brk;
681 void target_set_brk(abi_ulong new_brk)
683 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
686 /* do_brk() must return target values and target errnos. */
687 abi_long do_brk(abi_ulong new_brk)
690 abi_long mapped_addr;
695 if (new_brk < target_original_brk)
698 brk_page = HOST_PAGE_ALIGN(target_brk);
700 /* If the new brk is less than this, set it and we're done... */
701 if (new_brk < brk_page) {
702 target_brk = new_brk;
706 /* We need to allocate more memory after the brk... */
707 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
708 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
709 PROT_READ|PROT_WRITE,
710 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
712 if (!is_error(mapped_addr))
713 target_brk = new_brk;
718 static inline abi_long copy_from_user_fdset(fd_set *fds,
719 abi_ulong target_fds_addr,
723 abi_ulong b, *target_fds;
725 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
726 if (!(target_fds = lock_user(VERIFY_READ,
728 sizeof(abi_ulong) * nw,
730 return -TARGET_EFAULT;
734 for (i = 0; i < nw; i++) {
735 /* grab the abi_ulong */
736 __get_user(b, &target_fds[i]);
737 for (j = 0; j < TARGET_ABI_BITS; j++) {
738 /* check the bit inside the abi_ulong */
745 unlock_user(target_fds, target_fds_addr, 0);
750 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
756 abi_ulong *target_fds;
758 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
759 if (!(target_fds = lock_user(VERIFY_WRITE,
761 sizeof(abi_ulong) * nw,
763 return -TARGET_EFAULT;
766 for (i = 0; i < nw; i++) {
768 for (j = 0; j < TARGET_ABI_BITS; j++) {
769 v |= ((FD_ISSET(k, fds) != 0) << j);
772 __put_user(v, &target_fds[i]);
775 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
780 #if defined(__alpha__)
786 static inline abi_long host_to_target_clock_t(long ticks)
788 #if HOST_HZ == TARGET_HZ
791 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
795 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
796 const struct rusage *rusage)
798 struct target_rusage *target_rusage;
800 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
801 return -TARGET_EFAULT;
802 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
803 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
804 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
805 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
806 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
807 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
808 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
809 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
810 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
811 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
812 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
813 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
814 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
815 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
816 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
817 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
818 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
819 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
820 unlock_user_struct(target_rusage, target_addr, 1);
825 static inline abi_long copy_from_user_timeval(struct timeval *tv,
826 abi_ulong target_tv_addr)
828 struct target_timeval *target_tv;
830 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
831 return -TARGET_EFAULT;
833 __get_user(tv->tv_sec, &target_tv->tv_sec);
834 __get_user(tv->tv_usec, &target_tv->tv_usec);
836 unlock_user_struct(target_tv, target_tv_addr, 0);
841 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
842 const struct timeval *tv)
844 struct target_timeval *target_tv;
846 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
847 return -TARGET_EFAULT;
849 __put_user(tv->tv_sec, &target_tv->tv_sec);
850 __put_user(tv->tv_usec, &target_tv->tv_usec);
852 unlock_user_struct(target_tv, target_tv_addr, 1);
857 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
858 abi_ulong target_mq_attr_addr)
860 struct target_mq_attr *target_mq_attr;
862 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
863 target_mq_attr_addr, 1))
864 return -TARGET_EFAULT;
866 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
867 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
868 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
869 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
871 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
876 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
877 const struct mq_attr *attr)
879 struct target_mq_attr *target_mq_attr;
881 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
882 target_mq_attr_addr, 0))
883 return -TARGET_EFAULT;
885 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
886 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
887 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
888 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
890 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
895 /* do_select() must return target values and target errnos. */
896 static abi_long do_select(int n,
897 abi_ulong rfd_addr, abi_ulong wfd_addr,
898 abi_ulong efd_addr, abi_ulong target_tv_addr)
900 fd_set rfds, wfds, efds;
901 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
902 struct timeval tv, *tv_ptr;
906 if (copy_from_user_fdset(&rfds, rfd_addr, n))
907 return -TARGET_EFAULT;
913 if (copy_from_user_fdset(&wfds, wfd_addr, n))
914 return -TARGET_EFAULT;
920 if (copy_from_user_fdset(&efds, efd_addr, n))
921 return -TARGET_EFAULT;
927 if (target_tv_addr) {
928 if (copy_from_user_timeval(&tv, target_tv_addr))
929 return -TARGET_EFAULT;
935 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
937 if (!is_error(ret)) {
938 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
939 return -TARGET_EFAULT;
940 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
941 return -TARGET_EFAULT;
942 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
943 return -TARGET_EFAULT;
945 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
946 return -TARGET_EFAULT;
951 static abi_long pipe_set_flag(int fd, int readcmd, int writecmd, long newflag)
953 int flags = fcntl(fd, readcmd);
955 return get_errno(flags);
957 flags = fcntl(fd, writecmd, flags);
958 return get_errno(flags);
961 static abi_long do_pipe(void *cpu_env, int pipedes, int flags)
965 ret = pipe(host_pipe);
967 return get_errno(ret);
968 #if defined(TARGET_MIPS)
969 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
971 #elif defined(TARGET_SH4)
972 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
975 if (put_user_s32(host_pipe[0], pipedes)
976 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
977 return -TARGET_EFAULT;
979 if (flags & O_NONBLOCK) {
980 ret = pipe_set_flag(host_pipe[0], F_GETFL, F_SETFL, O_NONBLOCK);
982 return get_errno(ret);
983 ret = pipe_set_flag(host_pipe[1], F_GETFL, F_SETFL, O_NONBLOCK);
985 return get_errno(ret);
987 if (flags & O_CLOEXEC) {
988 ret = pipe_set_flag(host_pipe[0], F_GETFD, F_SETFD, FD_CLOEXEC);
990 return get_errno(ret);
991 ret = pipe_set_flag(host_pipe[1], F_GETFD, F_SETFD, FD_CLOEXEC);
993 return get_errno(ret);
995 return get_errno(ret);
998 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
999 abi_ulong target_addr,
1002 struct target_ip_mreqn *target_smreqn;
1004 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1006 return -TARGET_EFAULT;
1007 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1008 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1009 if (len == sizeof(struct target_ip_mreqn))
1010 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1011 unlock_user(target_smreqn, target_addr, 0);
1016 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1017 abi_ulong target_addr,
1020 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1021 sa_family_t sa_family;
1022 struct target_sockaddr *target_saddr;
1024 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1026 return -TARGET_EFAULT;
1028 sa_family = tswap16(target_saddr->sa_family);
1030 /* Oops. The caller might send a incomplete sun_path; sun_path
1031 * must be terminated by \0 (see the manual page), but
1032 * unfortunately it is quite common to specify sockaddr_un
1033 * length as "strlen(x->sun_path)" while it should be
1034 * "strlen(...) + 1". We'll fix that here if needed.
1035 * Linux kernel has a similar feature.
1038 if (sa_family == AF_UNIX) {
1039 if (len < unix_maxlen && len > 0) {
1040 char *cp = (char*)target_saddr;
1042 if ( cp[len-1] && !cp[len] )
1045 if (len > unix_maxlen)
1049 memcpy(addr, target_saddr, len);
1050 addr->sa_family = sa_family;
1051 unlock_user(target_saddr, target_addr, 0);
1056 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1057 struct sockaddr *addr,
1060 struct target_sockaddr *target_saddr;
1062 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1064 return -TARGET_EFAULT;
1065 memcpy(target_saddr, addr, len);
1066 target_saddr->sa_family = tswap16(addr->sa_family);
1067 unlock_user(target_saddr, target_addr, len);
1072 /* ??? Should this also swap msgh->name? */
1073 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1074 struct target_msghdr *target_msgh)
1076 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1077 abi_long msg_controllen;
1078 abi_ulong target_cmsg_addr;
1079 struct target_cmsghdr *target_cmsg;
1080 socklen_t space = 0;
1082 msg_controllen = tswapl(target_msgh->msg_controllen);
1083 if (msg_controllen < sizeof (struct target_cmsghdr))
1085 target_cmsg_addr = tswapl(target_msgh->msg_control);
1086 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1088 return -TARGET_EFAULT;
1090 while (cmsg && target_cmsg) {
1091 void *data = CMSG_DATA(cmsg);
1092 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1094 int len = tswapl(target_cmsg->cmsg_len)
1095 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1097 space += CMSG_SPACE(len);
1098 if (space > msgh->msg_controllen) {
1099 space -= CMSG_SPACE(len);
1100 gemu_log("Host cmsg overflow\n");
1104 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1105 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1106 cmsg->cmsg_len = CMSG_LEN(len);
1108 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1109 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1110 memcpy(data, target_data, len);
1112 int *fd = (int *)data;
1113 int *target_fd = (int *)target_data;
1114 int i, numfds = len / sizeof(int);
1116 for (i = 0; i < numfds; i++)
1117 fd[i] = tswap32(target_fd[i]);
1120 cmsg = CMSG_NXTHDR(msgh, cmsg);
1121 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1123 unlock_user(target_cmsg, target_cmsg_addr, 0);
1125 msgh->msg_controllen = space;
1129 /* ??? Should this also swap msgh->name? */
1130 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1131 struct msghdr *msgh)
1133 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1134 abi_long msg_controllen;
1135 abi_ulong target_cmsg_addr;
1136 struct target_cmsghdr *target_cmsg;
1137 socklen_t space = 0;
1139 msg_controllen = tswapl(target_msgh->msg_controllen);
1140 if (msg_controllen < sizeof (struct target_cmsghdr))
1142 target_cmsg_addr = tswapl(target_msgh->msg_control);
1143 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1145 return -TARGET_EFAULT;
1147 while (cmsg && target_cmsg) {
1148 void *data = CMSG_DATA(cmsg);
1149 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1151 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1153 space += TARGET_CMSG_SPACE(len);
1154 if (space > msg_controllen) {
1155 space -= TARGET_CMSG_SPACE(len);
1156 gemu_log("Target cmsg overflow\n");
1160 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1161 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1162 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1164 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1165 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1166 memcpy(target_data, data, len);
1168 int *fd = (int *)data;
1169 int *target_fd = (int *)target_data;
1170 int i, numfds = len / sizeof(int);
1172 for (i = 0; i < numfds; i++)
1173 target_fd[i] = tswap32(fd[i]);
1176 cmsg = CMSG_NXTHDR(msgh, cmsg);
1177 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1179 unlock_user(target_cmsg, target_cmsg_addr, space);
1181 target_msgh->msg_controllen = tswapl(space);
1185 /* do_setsockopt() Must return target values and target errnos. */
1186 static abi_long do_setsockopt(int sockfd, int level, int optname,
1187 abi_ulong optval_addr, socklen_t optlen)
1191 struct ip_mreqn *ip_mreq;
1192 struct ip_mreq_source *ip_mreq_source;
1196 /* TCP options all take an 'int' value. */
1197 if (optlen < sizeof(uint32_t))
1198 return -TARGET_EINVAL;
1200 if (get_user_u32(val, optval_addr))
1201 return -TARGET_EFAULT;
1202 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1209 case IP_ROUTER_ALERT:
1213 case IP_MTU_DISCOVER:
1219 case IP_MULTICAST_TTL:
1220 case IP_MULTICAST_LOOP:
1222 if (optlen >= sizeof(uint32_t)) {
1223 if (get_user_u32(val, optval_addr))
1224 return -TARGET_EFAULT;
1225 } else if (optlen >= 1) {
1226 if (get_user_u8(val, optval_addr))
1227 return -TARGET_EFAULT;
1229 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1231 case IP_ADD_MEMBERSHIP:
1232 case IP_DROP_MEMBERSHIP:
1233 if (optlen < sizeof (struct target_ip_mreq) ||
1234 optlen > sizeof (struct target_ip_mreqn))
1235 return -TARGET_EINVAL;
1237 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1238 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1239 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1242 case IP_BLOCK_SOURCE:
1243 case IP_UNBLOCK_SOURCE:
1244 case IP_ADD_SOURCE_MEMBERSHIP:
1245 case IP_DROP_SOURCE_MEMBERSHIP:
1246 if (optlen != sizeof (struct target_ip_mreq_source))
1247 return -TARGET_EINVAL;
1249 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1250 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1251 unlock_user (ip_mreq_source, optval_addr, 0);
1258 case TARGET_SOL_SOCKET:
1260 /* Options with 'int' argument. */
1261 case TARGET_SO_DEBUG:
1264 case TARGET_SO_REUSEADDR:
1265 optname = SO_REUSEADDR;
1267 case TARGET_SO_TYPE:
1270 case TARGET_SO_ERROR:
1273 case TARGET_SO_DONTROUTE:
1274 optname = SO_DONTROUTE;
1276 case TARGET_SO_BROADCAST:
1277 optname = SO_BROADCAST;
1279 case TARGET_SO_SNDBUF:
1280 optname = SO_SNDBUF;
1282 case TARGET_SO_RCVBUF:
1283 optname = SO_RCVBUF;
1285 case TARGET_SO_KEEPALIVE:
1286 optname = SO_KEEPALIVE;
1288 case TARGET_SO_OOBINLINE:
1289 optname = SO_OOBINLINE;
1291 case TARGET_SO_NO_CHECK:
1292 optname = SO_NO_CHECK;
1294 case TARGET_SO_PRIORITY:
1295 optname = SO_PRIORITY;
1298 case TARGET_SO_BSDCOMPAT:
1299 optname = SO_BSDCOMPAT;
1302 case TARGET_SO_PASSCRED:
1303 optname = SO_PASSCRED;
1305 case TARGET_SO_TIMESTAMP:
1306 optname = SO_TIMESTAMP;
1308 case TARGET_SO_RCVLOWAT:
1309 optname = SO_RCVLOWAT;
1311 case TARGET_SO_RCVTIMEO:
1312 optname = SO_RCVTIMEO;
1314 case TARGET_SO_SNDTIMEO:
1315 optname = SO_SNDTIMEO;
1321 if (optlen < sizeof(uint32_t))
1322 return -TARGET_EINVAL;
1324 if (get_user_u32(val, optval_addr))
1325 return -TARGET_EFAULT;
1326 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1330 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1331 ret = -TARGET_ENOPROTOOPT;
1336 /* do_getsockopt() Must return target values and target errnos. */
1337 static abi_long do_getsockopt(int sockfd, int level, int optname,
1338 abi_ulong optval_addr, abi_ulong optlen)
1345 case TARGET_SOL_SOCKET:
1348 case TARGET_SO_LINGER:
1349 case TARGET_SO_RCVTIMEO:
1350 case TARGET_SO_SNDTIMEO:
1351 case TARGET_SO_PEERCRED:
1352 case TARGET_SO_PEERNAME:
1353 /* These don't just return a single integer */
1360 /* TCP options all take an 'int' value. */
1362 if (get_user_u32(len, optlen))
1363 return -TARGET_EFAULT;
1365 return -TARGET_EINVAL;
1367 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1374 if (put_user_u32(val, optval_addr))
1375 return -TARGET_EFAULT;
1377 if (put_user_u8(val, optval_addr))
1378 return -TARGET_EFAULT;
1380 if (put_user_u32(len, optlen))
1381 return -TARGET_EFAULT;
1388 case IP_ROUTER_ALERT:
1392 case IP_MTU_DISCOVER:
1398 case IP_MULTICAST_TTL:
1399 case IP_MULTICAST_LOOP:
1400 if (get_user_u32(len, optlen))
1401 return -TARGET_EFAULT;
1403 return -TARGET_EINVAL;
1405 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1408 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1410 if (put_user_u32(len, optlen)
1411 || put_user_u8(val, optval_addr))
1412 return -TARGET_EFAULT;
1414 if (len > sizeof(int))
1416 if (put_user_u32(len, optlen)
1417 || put_user_u32(val, optval_addr))
1418 return -TARGET_EFAULT;
1422 ret = -TARGET_ENOPROTOOPT;
1428 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1430 ret = -TARGET_EOPNOTSUPP;
1437 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1438 * other lock functions have a return code of 0 for failure.
1440 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1441 int count, int copy)
1443 struct target_iovec *target_vec;
1447 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1449 return -TARGET_EFAULT;
1450 for(i = 0;i < count; i++) {
1451 base = tswapl(target_vec[i].iov_base);
1452 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1453 if (vec[i].iov_len != 0) {
1454 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1455 /* Don't check lock_user return value. We must call writev even
1456 if a element has invalid base address. */
1458 /* zero length pointer is ignored */
1459 vec[i].iov_base = NULL;
1462 unlock_user (target_vec, target_addr, 0);
1466 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1467 int count, int copy)
1469 struct target_iovec *target_vec;
1473 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1475 return -TARGET_EFAULT;
1476 for(i = 0;i < count; i++) {
1477 if (target_vec[i].iov_base) {
1478 base = tswapl(target_vec[i].iov_base);
1479 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1482 unlock_user (target_vec, target_addr, 0);
1487 /* do_socket() Must return target values and target errnos. */
1488 static abi_long do_socket(int domain, int type, int protocol)
1490 #if defined(TARGET_MIPS)
1492 case TARGET_SOCK_DGRAM:
1495 case TARGET_SOCK_STREAM:
1498 case TARGET_SOCK_RAW:
1501 case TARGET_SOCK_RDM:
1504 case TARGET_SOCK_SEQPACKET:
1505 type = SOCK_SEQPACKET;
1507 case TARGET_SOCK_PACKET:
1512 if (domain == PF_NETLINK)
1513 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1514 return get_errno(socket(domain, type, protocol));
1517 /* do_bind() Must return target values and target errnos. */
1518 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1524 return -TARGET_EINVAL;
1526 addr = alloca(addrlen+1);
1528 target_to_host_sockaddr(addr, target_addr, addrlen);
1529 return get_errno(bind(sockfd, addr, addrlen));
1532 /* do_connect() Must return target values and target errnos. */
1533 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1539 return -TARGET_EINVAL;
1541 addr = alloca(addrlen);
1543 target_to_host_sockaddr(addr, target_addr, addrlen);
1544 return get_errno(connect(sockfd, addr, addrlen));
1547 /* do_sendrecvmsg() Must return target values and target errnos. */
1548 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1549 int flags, int send)
1552 struct target_msghdr *msgp;
1556 abi_ulong target_vec;
1559 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1563 return -TARGET_EFAULT;
1564 if (msgp->msg_name) {
1565 msg.msg_namelen = tswap32(msgp->msg_namelen);
1566 msg.msg_name = alloca(msg.msg_namelen);
1567 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1570 msg.msg_name = NULL;
1571 msg.msg_namelen = 0;
1573 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1574 msg.msg_control = alloca(msg.msg_controllen);
1575 msg.msg_flags = tswap32(msgp->msg_flags);
1577 count = tswapl(msgp->msg_iovlen);
1578 vec = alloca(count * sizeof(struct iovec));
1579 target_vec = tswapl(msgp->msg_iov);
1580 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1581 msg.msg_iovlen = count;
1585 ret = target_to_host_cmsg(&msg, msgp);
1587 ret = get_errno(sendmsg(fd, &msg, flags));
1589 ret = get_errno(recvmsg(fd, &msg, flags));
1590 if (!is_error(ret)) {
1592 ret = host_to_target_cmsg(msgp, &msg);
1597 unlock_iovec(vec, target_vec, count, !send);
1598 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1602 /* do_accept() Must return target values and target errnos. */
1603 static abi_long do_accept(int fd, abi_ulong target_addr,
1604 abi_ulong target_addrlen_addr)
1610 if (target_addr == 0)
1611 return get_errno(accept(fd, NULL, NULL));
1613 if (get_user_u32(addrlen, target_addrlen_addr))
1614 return -TARGET_EFAULT;
1617 return -TARGET_EINVAL;
1619 addr = alloca(addrlen);
1621 ret = get_errno(accept(fd, addr, &addrlen));
1622 if (!is_error(ret)) {
1623 host_to_target_sockaddr(target_addr, addr, addrlen);
1624 if (put_user_u32(addrlen, target_addrlen_addr))
1625 ret = -TARGET_EFAULT;
1630 /* do_getpeername() Must return target values and target errnos. */
1631 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1632 abi_ulong target_addrlen_addr)
1638 if (get_user_u32(addrlen, target_addrlen_addr))
1639 return -TARGET_EFAULT;
1642 return -TARGET_EINVAL;
1644 addr = alloca(addrlen);
1646 ret = get_errno(getpeername(fd, addr, &addrlen));
1647 if (!is_error(ret)) {
1648 host_to_target_sockaddr(target_addr, addr, addrlen);
1649 if (put_user_u32(addrlen, target_addrlen_addr))
1650 ret = -TARGET_EFAULT;
1655 /* do_getsockname() Must return target values and target errnos. */
1656 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1657 abi_ulong target_addrlen_addr)
1663 if (target_addr == 0)
1664 return get_errno(accept(fd, NULL, NULL));
1666 if (get_user_u32(addrlen, target_addrlen_addr))
1667 return -TARGET_EFAULT;
1670 return -TARGET_EINVAL;
1672 addr = alloca(addrlen);
1674 ret = get_errno(getsockname(fd, addr, &addrlen));
1675 if (!is_error(ret)) {
1676 host_to_target_sockaddr(target_addr, addr, addrlen);
1677 if (put_user_u32(addrlen, target_addrlen_addr))
1678 ret = -TARGET_EFAULT;
1683 /* do_socketpair() Must return target values and target errnos. */
1684 static abi_long do_socketpair(int domain, int type, int protocol,
1685 abi_ulong target_tab_addr)
1690 ret = get_errno(socketpair(domain, type, protocol, tab));
1691 if (!is_error(ret)) {
1692 if (put_user_s32(tab[0], target_tab_addr)
1693 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1694 ret = -TARGET_EFAULT;
1699 /* do_sendto() Must return target values and target errnos. */
1700 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1701 abi_ulong target_addr, socklen_t addrlen)
1708 return -TARGET_EINVAL;
1710 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1712 return -TARGET_EFAULT;
1714 addr = alloca(addrlen);
1715 target_to_host_sockaddr(addr, target_addr, addrlen);
1716 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1718 ret = get_errno(send(fd, host_msg, len, flags));
1720 unlock_user(host_msg, msg, 0);
1724 /* do_recvfrom() Must return target values and target errnos. */
1725 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1726 abi_ulong target_addr,
1727 abi_ulong target_addrlen)
1734 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1736 return -TARGET_EFAULT;
1738 if (get_user_u32(addrlen, target_addrlen)) {
1739 ret = -TARGET_EFAULT;
1743 ret = -TARGET_EINVAL;
1746 addr = alloca(addrlen);
1747 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1749 addr = NULL; /* To keep compiler quiet. */
1750 ret = get_errno(recv(fd, host_msg, len, flags));
1752 if (!is_error(ret)) {
1754 host_to_target_sockaddr(target_addr, addr, addrlen);
1755 if (put_user_u32(addrlen, target_addrlen)) {
1756 ret = -TARGET_EFAULT;
1760 unlock_user(host_msg, msg, len);
1763 unlock_user(host_msg, msg, 0);
1768 #ifdef TARGET_NR_socketcall
1769 /* do_socketcall() Must return target values and target errnos. */
1770 static abi_long do_socketcall(int num, abi_ulong vptr)
1773 const int n = sizeof(abi_ulong);
1778 int domain, type, protocol;
1780 if (get_user_s32(domain, vptr)
1781 || get_user_s32(type, vptr + n)
1782 || get_user_s32(protocol, vptr + 2 * n))
1783 return -TARGET_EFAULT;
1785 ret = do_socket(domain, type, protocol);
1791 abi_ulong target_addr;
1794 if (get_user_s32(sockfd, vptr)
1795 || get_user_ual(target_addr, vptr + n)
1796 || get_user_u32(addrlen, vptr + 2 * n))
1797 return -TARGET_EFAULT;
1799 ret = do_bind(sockfd, target_addr, addrlen);
1802 case SOCKOP_connect:
1805 abi_ulong target_addr;
1808 if (get_user_s32(sockfd, vptr)
1809 || get_user_ual(target_addr, vptr + n)
1810 || get_user_u32(addrlen, vptr + 2 * n))
1811 return -TARGET_EFAULT;
1813 ret = do_connect(sockfd, target_addr, addrlen);
1818 int sockfd, backlog;
1820 if (get_user_s32(sockfd, vptr)
1821 || get_user_s32(backlog, vptr + n))
1822 return -TARGET_EFAULT;
1824 ret = get_errno(listen(sockfd, backlog));
1830 abi_ulong target_addr, target_addrlen;
1832 if (get_user_s32(sockfd, vptr)
1833 || get_user_ual(target_addr, vptr + n)
1834 || get_user_u32(target_addrlen, vptr + 2 * n))
1835 return -TARGET_EFAULT;
1837 ret = do_accept(sockfd, target_addr, target_addrlen);
1840 case SOCKOP_getsockname:
1843 abi_ulong target_addr, target_addrlen;
1845 if (get_user_s32(sockfd, vptr)
1846 || get_user_ual(target_addr, vptr + n)
1847 || get_user_u32(target_addrlen, vptr + 2 * n))
1848 return -TARGET_EFAULT;
1850 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1853 case SOCKOP_getpeername:
1856 abi_ulong target_addr, target_addrlen;
1858 if (get_user_s32(sockfd, vptr)
1859 || get_user_ual(target_addr, vptr + n)
1860 || get_user_u32(target_addrlen, vptr + 2 * n))
1861 return -TARGET_EFAULT;
1863 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1866 case SOCKOP_socketpair:
1868 int domain, type, protocol;
1871 if (get_user_s32(domain, vptr)
1872 || get_user_s32(type, vptr + n)
1873 || get_user_s32(protocol, vptr + 2 * n)
1874 || get_user_ual(tab, vptr + 3 * n))
1875 return -TARGET_EFAULT;
1877 ret = do_socketpair(domain, type, protocol, tab);
1887 if (get_user_s32(sockfd, vptr)
1888 || get_user_ual(msg, vptr + n)
1889 || get_user_ual(len, vptr + 2 * n)
1890 || get_user_s32(flags, vptr + 3 * n))
1891 return -TARGET_EFAULT;
1893 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1903 if (get_user_s32(sockfd, vptr)
1904 || get_user_ual(msg, vptr + n)
1905 || get_user_ual(len, vptr + 2 * n)
1906 || get_user_s32(flags, vptr + 3 * n))
1907 return -TARGET_EFAULT;
1909 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1921 if (get_user_s32(sockfd, vptr)
1922 || get_user_ual(msg, vptr + n)
1923 || get_user_ual(len, vptr + 2 * n)
1924 || get_user_s32(flags, vptr + 3 * n)
1925 || get_user_ual(addr, vptr + 4 * n)
1926 || get_user_u32(addrlen, vptr + 5 * n))
1927 return -TARGET_EFAULT;
1929 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1932 case SOCKOP_recvfrom:
1941 if (get_user_s32(sockfd, vptr)
1942 || get_user_ual(msg, vptr + n)
1943 || get_user_ual(len, vptr + 2 * n)
1944 || get_user_s32(flags, vptr + 3 * n)
1945 || get_user_ual(addr, vptr + 4 * n)
1946 || get_user_u32(addrlen, vptr + 5 * n))
1947 return -TARGET_EFAULT;
1949 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1952 case SOCKOP_shutdown:
1956 if (get_user_s32(sockfd, vptr)
1957 || get_user_s32(how, vptr + n))
1958 return -TARGET_EFAULT;
1960 ret = get_errno(shutdown(sockfd, how));
1963 case SOCKOP_sendmsg:
1964 case SOCKOP_recvmsg:
1967 abi_ulong target_msg;
1970 if (get_user_s32(fd, vptr)
1971 || get_user_ual(target_msg, vptr + n)
1972 || get_user_s32(flags, vptr + 2 * n))
1973 return -TARGET_EFAULT;
1975 ret = do_sendrecvmsg(fd, target_msg, flags,
1976 (num == SOCKOP_sendmsg));
1979 case SOCKOP_setsockopt:
1987 if (get_user_s32(sockfd, vptr)
1988 || get_user_s32(level, vptr + n)
1989 || get_user_s32(optname, vptr + 2 * n)
1990 || get_user_ual(optval, vptr + 3 * n)
1991 || get_user_u32(optlen, vptr + 4 * n))
1992 return -TARGET_EFAULT;
1994 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1997 case SOCKOP_getsockopt:
2005 if (get_user_s32(sockfd, vptr)
2006 || get_user_s32(level, vptr + n)
2007 || get_user_s32(optname, vptr + 2 * n)
2008 || get_user_ual(optval, vptr + 3 * n)
2009 || get_user_u32(optlen, vptr + 4 * n))
2010 return -TARGET_EFAULT;
2012 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2016 gemu_log("Unsupported socketcall: %d\n", num);
2017 ret = -TARGET_ENOSYS;
2024 #define N_SHM_REGIONS 32
2026 static struct shm_region {
2029 } shm_regions[N_SHM_REGIONS];
2031 struct target_ipc_perm
2038 unsigned short int mode;
2039 unsigned short int __pad1;
2040 unsigned short int __seq;
2041 unsigned short int __pad2;
2042 abi_ulong __unused1;
2043 abi_ulong __unused2;
2046 struct target_semid_ds
2048 struct target_ipc_perm sem_perm;
2049 abi_ulong sem_otime;
2050 abi_ulong __unused1;
2051 abi_ulong sem_ctime;
2052 abi_ulong __unused2;
2053 abi_ulong sem_nsems;
2054 abi_ulong __unused3;
2055 abi_ulong __unused4;
2058 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2059 abi_ulong target_addr)
2061 struct target_ipc_perm *target_ip;
2062 struct target_semid_ds *target_sd;
2064 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2065 return -TARGET_EFAULT;
2066 target_ip=&(target_sd->sem_perm);
2067 host_ip->__key = tswapl(target_ip->__key);
2068 host_ip->uid = tswapl(target_ip->uid);
2069 host_ip->gid = tswapl(target_ip->gid);
2070 host_ip->cuid = tswapl(target_ip->cuid);
2071 host_ip->cgid = tswapl(target_ip->cgid);
2072 host_ip->mode = tswapl(target_ip->mode);
2073 unlock_user_struct(target_sd, target_addr, 0);
2077 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2078 struct ipc_perm *host_ip)
2080 struct target_ipc_perm *target_ip;
2081 struct target_semid_ds *target_sd;
2083 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2084 return -TARGET_EFAULT;
2085 target_ip = &(target_sd->sem_perm);
2086 target_ip->__key = tswapl(host_ip->__key);
2087 target_ip->uid = tswapl(host_ip->uid);
2088 target_ip->gid = tswapl(host_ip->gid);
2089 target_ip->cuid = tswapl(host_ip->cuid);
2090 target_ip->cgid = tswapl(host_ip->cgid);
2091 target_ip->mode = tswapl(host_ip->mode);
2092 unlock_user_struct(target_sd, target_addr, 1);
2096 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2097 abi_ulong target_addr)
2099 struct target_semid_ds *target_sd;
2101 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2102 return -TARGET_EFAULT;
2103 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2104 return -TARGET_EFAULT;
2105 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2106 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2107 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2108 unlock_user_struct(target_sd, target_addr, 0);
2112 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2113 struct semid_ds *host_sd)
2115 struct target_semid_ds *target_sd;
2117 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2118 return -TARGET_EFAULT;
2119 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2120 return -TARGET_EFAULT;;
2121 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2122 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2123 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2124 unlock_user_struct(target_sd, target_addr, 1);
2128 struct target_seminfo {
2141 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2142 struct seminfo *host_seminfo)
2144 struct target_seminfo *target_seminfo;
2145 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2146 return -TARGET_EFAULT;
2147 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2148 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2149 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2150 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2151 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2152 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2153 __put_user(host_seminfo->semume, &target_seminfo->semume);
2154 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2155 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2156 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2157 unlock_user_struct(target_seminfo, target_addr, 1);
2163 struct semid_ds *buf;
2164 unsigned short *array;
2165 struct seminfo *__buf;
2168 union target_semun {
2175 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2176 abi_ulong target_addr)
2179 unsigned short *array;
2181 struct semid_ds semid_ds;
2184 semun.buf = &semid_ds;
2186 ret = semctl(semid, 0, IPC_STAT, semun);
2188 return get_errno(ret);
2190 nsems = semid_ds.sem_nsems;
2192 *host_array = malloc(nsems*sizeof(unsigned short));
2193 array = lock_user(VERIFY_READ, target_addr,
2194 nsems*sizeof(unsigned short), 1);
2196 return -TARGET_EFAULT;
2198 for(i=0; i<nsems; i++) {
2199 __get_user((*host_array)[i], &array[i]);
2201 unlock_user(array, target_addr, 0);
2206 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2207 unsigned short **host_array)
2210 unsigned short *array;
2212 struct semid_ds semid_ds;
2215 semun.buf = &semid_ds;
2217 ret = semctl(semid, 0, IPC_STAT, semun);
2219 return get_errno(ret);
2221 nsems = semid_ds.sem_nsems;
2223 array = lock_user(VERIFY_WRITE, target_addr,
2224 nsems*sizeof(unsigned short), 0);
2226 return -TARGET_EFAULT;
2228 for(i=0; i<nsems; i++) {
2229 __put_user((*host_array)[i], &array[i]);
2232 unlock_user(array, target_addr, 1);
2237 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2238 union target_semun target_su)
2241 struct semid_ds dsarg;
2242 unsigned short *array;
2243 struct seminfo seminfo;
2244 abi_long ret = -TARGET_EINVAL;
2251 arg.val = tswapl(target_su.val);
2252 ret = get_errno(semctl(semid, semnum, cmd, arg));
2253 target_su.val = tswapl(arg.val);
2257 err = target_to_host_semarray(semid, &array, target_su.array);
2261 ret = get_errno(semctl(semid, semnum, cmd, arg));
2262 err = host_to_target_semarray(semid, target_su.array, &array);
2269 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2273 ret = get_errno(semctl(semid, semnum, cmd, arg));
2274 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2280 arg.__buf = &seminfo;
2281 ret = get_errno(semctl(semid, semnum, cmd, arg));
2282 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2290 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2297 struct target_sembuf {
2298 unsigned short sem_num;
2303 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2304 abi_ulong target_addr,
2307 struct target_sembuf *target_sembuf;
2310 target_sembuf = lock_user(VERIFY_READ, target_addr,
2311 nsops*sizeof(struct target_sembuf), 1);
2313 return -TARGET_EFAULT;
2315 for(i=0; i<nsops; i++) {
2316 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2317 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2318 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2321 unlock_user(target_sembuf, target_addr, 0);
2326 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2328 struct sembuf sops[nsops];
2330 if (target_to_host_sembuf(sops, ptr, nsops))
2331 return -TARGET_EFAULT;
2333 return semop(semid, sops, nsops);
2336 struct target_msqid_ds
2338 struct target_ipc_perm msg_perm;
2339 abi_ulong msg_stime;
2340 #if TARGET_ABI_BITS == 32
2341 abi_ulong __unused1;
2343 abi_ulong msg_rtime;
2344 #if TARGET_ABI_BITS == 32
2345 abi_ulong __unused2;
2347 abi_ulong msg_ctime;
2348 #if TARGET_ABI_BITS == 32
2349 abi_ulong __unused3;
2351 abi_ulong __msg_cbytes;
2353 abi_ulong msg_qbytes;
2354 abi_ulong msg_lspid;
2355 abi_ulong msg_lrpid;
2356 abi_ulong __unused4;
2357 abi_ulong __unused5;
2360 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2361 abi_ulong target_addr)
2363 struct target_msqid_ds *target_md;
2365 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2366 return -TARGET_EFAULT;
2367 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2368 return -TARGET_EFAULT;
2369 host_md->msg_stime = tswapl(target_md->msg_stime);
2370 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2371 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2372 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2373 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2374 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2375 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2376 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2377 unlock_user_struct(target_md, target_addr, 0);
2381 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2382 struct msqid_ds *host_md)
2384 struct target_msqid_ds *target_md;
2386 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2387 return -TARGET_EFAULT;
2388 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2389 return -TARGET_EFAULT;
2390 target_md->msg_stime = tswapl(host_md->msg_stime);
2391 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2392 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2393 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2394 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2395 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2396 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2397 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2398 unlock_user_struct(target_md, target_addr, 1);
2402 struct target_msginfo {
2410 unsigned short int msgseg;
2413 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2414 struct msginfo *host_msginfo)
2416 struct target_msginfo *target_msginfo;
2417 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2418 return -TARGET_EFAULT;
2419 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2420 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2421 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2422 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2423 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2424 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2425 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2426 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2427 unlock_user_struct(target_msginfo, target_addr, 1);
2431 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2433 struct msqid_ds dsarg;
2434 struct msginfo msginfo;
2435 abi_long ret = -TARGET_EINVAL;
2443 if (target_to_host_msqid_ds(&dsarg,ptr))
2444 return -TARGET_EFAULT;
2445 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2446 if (host_to_target_msqid_ds(ptr,&dsarg))
2447 return -TARGET_EFAULT;
2450 ret = get_errno(msgctl(msgid, cmd, NULL));
2454 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2455 if (host_to_target_msginfo(ptr, &msginfo))
2456 return -TARGET_EFAULT;
2463 struct target_msgbuf {
2468 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2469 unsigned int msgsz, int msgflg)
2471 struct target_msgbuf *target_mb;
2472 struct msgbuf *host_mb;
2475 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2476 return -TARGET_EFAULT;
2477 host_mb = malloc(msgsz+sizeof(long));
2478 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2479 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2480 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2482 unlock_user_struct(target_mb, msgp, 0);
2487 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2488 unsigned int msgsz, abi_long msgtyp,
2491 struct target_msgbuf *target_mb;
2493 struct msgbuf *host_mb;
2496 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2497 return -TARGET_EFAULT;
2499 host_mb = malloc(msgsz+sizeof(long));
2500 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2503 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2504 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2505 if (!target_mtext) {
2506 ret = -TARGET_EFAULT;
2509 memcpy(target_mb->mtext, host_mb->mtext, ret);
2510 unlock_user(target_mtext, target_mtext_addr, ret);
2513 target_mb->mtype = tswapl(host_mb->mtype);
2518 unlock_user_struct(target_mb, msgp, 1);
2522 struct target_shmid_ds
2524 struct target_ipc_perm shm_perm;
2525 abi_ulong shm_segsz;
2526 abi_ulong shm_atime;
2527 #if TARGET_ABI_BITS == 32
2528 abi_ulong __unused1;
2530 abi_ulong shm_dtime;
2531 #if TARGET_ABI_BITS == 32
2532 abi_ulong __unused2;
2534 abi_ulong shm_ctime;
2535 #if TARGET_ABI_BITS == 32
2536 abi_ulong __unused3;
2540 abi_ulong shm_nattch;
2541 unsigned long int __unused4;
2542 unsigned long int __unused5;
2545 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2546 abi_ulong target_addr)
2548 struct target_shmid_ds *target_sd;
2550 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2551 return -TARGET_EFAULT;
2552 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2553 return -TARGET_EFAULT;
2554 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2555 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2556 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2557 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2558 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2559 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2560 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2561 unlock_user_struct(target_sd, target_addr, 0);
2565 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2566 struct shmid_ds *host_sd)
2568 struct target_shmid_ds *target_sd;
2570 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2571 return -TARGET_EFAULT;
2572 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2573 return -TARGET_EFAULT;
2574 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2575 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2576 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2577 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2578 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2579 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2580 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2581 unlock_user_struct(target_sd, target_addr, 1);
2585 struct target_shminfo {
2593 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2594 struct shminfo *host_shminfo)
2596 struct target_shminfo *target_shminfo;
2597 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2598 return -TARGET_EFAULT;
2599 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2600 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2601 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2602 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2603 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2604 unlock_user_struct(target_shminfo, target_addr, 1);
2608 struct target_shm_info {
2613 abi_ulong swap_attempts;
2614 abi_ulong swap_successes;
2617 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2618 struct shm_info *host_shm_info)
2620 struct target_shm_info *target_shm_info;
2621 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2622 return -TARGET_EFAULT;
2623 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2624 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2625 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2626 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2627 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2628 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2629 unlock_user_struct(target_shm_info, target_addr, 1);
2633 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2635 struct shmid_ds dsarg;
2636 struct shminfo shminfo;
2637 struct shm_info shm_info;
2638 abi_long ret = -TARGET_EINVAL;
2646 if (target_to_host_shmid_ds(&dsarg, buf))
2647 return -TARGET_EFAULT;
2648 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2649 if (host_to_target_shmid_ds(buf, &dsarg))
2650 return -TARGET_EFAULT;
2653 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2654 if (host_to_target_shminfo(buf, &shminfo))
2655 return -TARGET_EFAULT;
2658 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2659 if (host_to_target_shm_info(buf, &shm_info))
2660 return -TARGET_EFAULT;
2665 ret = get_errno(shmctl(shmid, cmd, NULL));
2672 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2676 struct shmid_ds shm_info;
2679 /* find out the length of the shared memory segment */
2680 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2681 if (is_error(ret)) {
2682 /* can't get length, bail out */
2689 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2691 abi_ulong mmap_start;
2693 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2695 if (mmap_start == -1) {
2697 host_raddr = (void *)-1;
2699 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2702 if (host_raddr == (void *)-1) {
2704 return get_errno((long)host_raddr);
2706 raddr=h2g((unsigned long)host_raddr);
2708 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2709 PAGE_VALID | PAGE_READ |
2710 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2712 for (i = 0; i < N_SHM_REGIONS; i++) {
2713 if (shm_regions[i].start == 0) {
2714 shm_regions[i].start = raddr;
2715 shm_regions[i].size = shm_info.shm_segsz;
2725 static inline abi_long do_shmdt(abi_ulong shmaddr)
2729 for (i = 0; i < N_SHM_REGIONS; ++i) {
2730 if (shm_regions[i].start == shmaddr) {
2731 shm_regions[i].start = 0;
2732 page_set_flags(shmaddr, shm_regions[i].size, 0);
2737 return get_errno(shmdt(g2h(shmaddr)));
2740 #ifdef TARGET_NR_ipc
2741 /* ??? This only works with linear mappings. */
2742 /* do_ipc() must return target values and target errnos. */
2743 static abi_long do_ipc(unsigned int call, int first,
2744 int second, int third,
2745 abi_long ptr, abi_long fifth)
2750 version = call >> 16;
2755 ret = do_semop(first, ptr, second);
2759 ret = get_errno(semget(first, second, third));
2763 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2767 ret = get_errno(msgget(first, second));
2771 ret = do_msgsnd(first, ptr, second, third);
2775 ret = do_msgctl(first, second, ptr);
2782 struct target_ipc_kludge {
2787 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2788 ret = -TARGET_EFAULT;
2792 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2794 unlock_user_struct(tmp, ptr, 0);
2798 ret = do_msgrcv(first, ptr, second, fifth, third);
2807 raddr = do_shmat(first, ptr, second);
2808 if (is_error(raddr))
2809 return get_errno(raddr);
2810 if (put_user_ual(raddr, third))
2811 return -TARGET_EFAULT;
2815 ret = -TARGET_EINVAL;
2820 ret = do_shmdt(ptr);
2824 /* IPC_* flag values are the same on all linux platforms */
2825 ret = get_errno(shmget(first, second, third));
2828 /* IPC_* and SHM_* command values are the same on all linux platforms */
2830 ret = do_shmctl(first, second, third);
2833 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2834 ret = -TARGET_ENOSYS;
2841 /* kernel structure types definitions */
2844 #define STRUCT(name, list...) STRUCT_ ## name,
2845 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2847 #include "syscall_types.h"
2850 #undef STRUCT_SPECIAL
2852 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2853 #define STRUCT_SPECIAL(name)
2854 #include "syscall_types.h"
2856 #undef STRUCT_SPECIAL
2858 typedef struct IOCTLEntry {
2859 unsigned int target_cmd;
2860 unsigned int host_cmd;
2863 const argtype arg_type[5];
2866 #define IOC_R 0x0001
2867 #define IOC_W 0x0002
2868 #define IOC_RW (IOC_R | IOC_W)
2870 #define MAX_STRUCT_SIZE 4096
2872 static IOCTLEntry ioctl_entries[] = {
2873 #define IOCTL(cmd, access, types...) \
2874 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2879 /* ??? Implement proper locking for ioctls. */
2880 /* do_ioctl() Must return target values and target errnos. */
2881 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2883 const IOCTLEntry *ie;
2884 const argtype *arg_type;
2886 uint8_t buf_temp[MAX_STRUCT_SIZE];
2892 if (ie->target_cmd == 0) {
2893 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2894 return -TARGET_ENOSYS;
2896 if (ie->target_cmd == cmd)
2900 arg_type = ie->arg_type;
2902 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2904 switch(arg_type[0]) {
2907 ret = get_errno(ioctl(fd, ie->host_cmd));
2912 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2916 target_size = thunk_type_size(arg_type, 0);
2917 switch(ie->access) {
2919 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2920 if (!is_error(ret)) {
2921 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2923 return -TARGET_EFAULT;
2924 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2925 unlock_user(argptr, arg, target_size);
2929 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2931 return -TARGET_EFAULT;
2932 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2933 unlock_user(argptr, arg, 0);
2934 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2938 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2940 return -TARGET_EFAULT;
2941 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2942 unlock_user(argptr, arg, 0);
2943 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2944 if (!is_error(ret)) {
2945 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2947 return -TARGET_EFAULT;
2948 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2949 unlock_user(argptr, arg, target_size);
2955 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2956 (long)cmd, arg_type[0]);
2957 ret = -TARGET_ENOSYS;
2963 static const bitmask_transtbl iflag_tbl[] = {
2964 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2965 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2966 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2967 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2968 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2969 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2970 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2971 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2972 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2973 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2974 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2975 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2976 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2977 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2981 static const bitmask_transtbl oflag_tbl[] = {
2982 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2983 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2984 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2985 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2986 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2987 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2988 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2989 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2990 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2991 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2992 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2993 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2994 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2995 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2996 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2997 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2998 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2999 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3000 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3001 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3002 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3003 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3004 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3005 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3009 static const bitmask_transtbl cflag_tbl[] = {
3010 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3011 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3012 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3013 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3014 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3015 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3016 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3017 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3018 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3019 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3020 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3021 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3022 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3023 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3024 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3025 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3026 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3027 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3028 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3029 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3030 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3031 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3032 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3033 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3034 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3035 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3036 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3037 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3038 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3039 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3040 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3044 static const bitmask_transtbl lflag_tbl[] = {
3045 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3046 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3047 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3048 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3049 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3050 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3051 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3052 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3053 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3054 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3055 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3056 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3057 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3058 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3059 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3063 static void target_to_host_termios (void *dst, const void *src)
3065 struct host_termios *host = dst;
3066 const struct target_termios *target = src;
3069 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3071 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3073 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3075 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3076 host->c_line = target->c_line;
3078 memset(host->c_cc, 0, sizeof(host->c_cc));
3079 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3080 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3081 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3082 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3083 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3084 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3085 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3086 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3087 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3088 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3089 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3090 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3091 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3092 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3093 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3094 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3095 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3098 static void host_to_target_termios (void *dst, const void *src)
3100 struct target_termios *target = dst;
3101 const struct host_termios *host = src;
3104 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3106 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3108 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3110 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3111 target->c_line = host->c_line;
3113 memset(target->c_cc, 0, sizeof(target->c_cc));
3114 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3115 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3116 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3117 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3118 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3119 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3120 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3121 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3122 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3123 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3124 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3125 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3126 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3127 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3128 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3129 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3130 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3133 static const StructEntry struct_termios_def = {
3134 .convert = { host_to_target_termios, target_to_host_termios },
3135 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3136 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3139 static bitmask_transtbl mmap_flags_tbl[] = {
3140 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3141 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3142 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3143 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3144 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3145 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3146 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3147 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3151 #if defined(TARGET_I386)
3153 /* NOTE: there is really one LDT for all the threads */
3154 static uint8_t *ldt_table;
3156 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3163 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3164 if (size > bytecount)
3166 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3168 return -TARGET_EFAULT;
3169 /* ??? Should this by byteswapped? */
3170 memcpy(p, ldt_table, size);
3171 unlock_user(p, ptr, size);
3175 /* XXX: add locking support */
3176 static abi_long write_ldt(CPUX86State *env,
3177 abi_ulong ptr, unsigned long bytecount, int oldmode)
3179 struct target_modify_ldt_ldt_s ldt_info;
3180 struct target_modify_ldt_ldt_s *target_ldt_info;
3181 int seg_32bit, contents, read_exec_only, limit_in_pages;
3182 int seg_not_present, useable, lm;
3183 uint32_t *lp, entry_1, entry_2;
3185 if (bytecount != sizeof(ldt_info))
3186 return -TARGET_EINVAL;
3187 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3188 return -TARGET_EFAULT;
3189 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3190 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3191 ldt_info.limit = tswap32(target_ldt_info->limit);
3192 ldt_info.flags = tswap32(target_ldt_info->flags);
3193 unlock_user_struct(target_ldt_info, ptr, 0);
3195 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3196 return -TARGET_EINVAL;
3197 seg_32bit = ldt_info.flags & 1;
3198 contents = (ldt_info.flags >> 1) & 3;
3199 read_exec_only = (ldt_info.flags >> 3) & 1;
3200 limit_in_pages = (ldt_info.flags >> 4) & 1;
3201 seg_not_present = (ldt_info.flags >> 5) & 1;
3202 useable = (ldt_info.flags >> 6) & 1;
3206 lm = (ldt_info.flags >> 7) & 1;
3208 if (contents == 3) {
3210 return -TARGET_EINVAL;
3211 if (seg_not_present == 0)
3212 return -TARGET_EINVAL;
3214 /* allocate the LDT */
3216 env->ldt.base = target_mmap(0,
3217 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3218 PROT_READ|PROT_WRITE,
3219 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3220 if (env->ldt.base == -1)
3221 return -TARGET_ENOMEM;
3222 memset(g2h(env->ldt.base), 0,
3223 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3224 env->ldt.limit = 0xffff;
3225 ldt_table = g2h(env->ldt.base);
3228 /* NOTE: same code as Linux kernel */
3229 /* Allow LDTs to be cleared by the user. */
3230 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3233 read_exec_only == 1 &&
3235 limit_in_pages == 0 &&
3236 seg_not_present == 1 &&
3244 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3245 (ldt_info.limit & 0x0ffff);
3246 entry_2 = (ldt_info.base_addr & 0xff000000) |
3247 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3248 (ldt_info.limit & 0xf0000) |
3249 ((read_exec_only ^ 1) << 9) |
3251 ((seg_not_present ^ 1) << 15) |
3253 (limit_in_pages << 23) |
3257 entry_2 |= (useable << 20);
3259 /* Install the new entry ... */
3261 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3262 lp[0] = tswap32(entry_1);
3263 lp[1] = tswap32(entry_2);
3267 /* specific and weird i386 syscalls */
3268 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3269 unsigned long bytecount)
3275 ret = read_ldt(ptr, bytecount);
3278 ret = write_ldt(env, ptr, bytecount, 1);
3281 ret = write_ldt(env, ptr, bytecount, 0);
3284 ret = -TARGET_ENOSYS;
3290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3291 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3293 uint64_t *gdt_table = g2h(env->gdt.base);
3294 struct target_modify_ldt_ldt_s ldt_info;
3295 struct target_modify_ldt_ldt_s *target_ldt_info;
3296 int seg_32bit, contents, read_exec_only, limit_in_pages;
3297 int seg_not_present, useable, lm;
3298 uint32_t *lp, entry_1, entry_2;
3301 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3302 if (!target_ldt_info)
3303 return -TARGET_EFAULT;
3304 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3305 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3306 ldt_info.limit = tswap32(target_ldt_info->limit);
3307 ldt_info.flags = tswap32(target_ldt_info->flags);
3308 if (ldt_info.entry_number == -1) {
3309 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3310 if (gdt_table[i] == 0) {
3311 ldt_info.entry_number = i;
3312 target_ldt_info->entry_number = tswap32(i);
3317 unlock_user_struct(target_ldt_info, ptr, 1);
3319 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3320 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3321 return -TARGET_EINVAL;
3322 seg_32bit = ldt_info.flags & 1;
3323 contents = (ldt_info.flags >> 1) & 3;
3324 read_exec_only = (ldt_info.flags >> 3) & 1;
3325 limit_in_pages = (ldt_info.flags >> 4) & 1;
3326 seg_not_present = (ldt_info.flags >> 5) & 1;
3327 useable = (ldt_info.flags >> 6) & 1;
3331 lm = (ldt_info.flags >> 7) & 1;
3334 if (contents == 3) {
3335 if (seg_not_present == 0)
3336 return -TARGET_EINVAL;
3339 /* NOTE: same code as Linux kernel */
3340 /* Allow LDTs to be cleared by the user. */
3341 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3342 if ((contents == 0 &&
3343 read_exec_only == 1 &&
3345 limit_in_pages == 0 &&
3346 seg_not_present == 1 &&
3354 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3355 (ldt_info.limit & 0x0ffff);
3356 entry_2 = (ldt_info.base_addr & 0xff000000) |
3357 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3358 (ldt_info.limit & 0xf0000) |
3359 ((read_exec_only ^ 1) << 9) |
3361 ((seg_not_present ^ 1) << 15) |
3363 (limit_in_pages << 23) |
3368 /* Install the new entry ... */
3370 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3371 lp[0] = tswap32(entry_1);
3372 lp[1] = tswap32(entry_2);
3376 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3378 struct target_modify_ldt_ldt_s *target_ldt_info;
3379 uint64_t *gdt_table = g2h(env->gdt.base);
3380 uint32_t base_addr, limit, flags;
3381 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3382 int seg_not_present, useable, lm;
3383 uint32_t *lp, entry_1, entry_2;
3385 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3386 if (!target_ldt_info)
3387 return -TARGET_EFAULT;
3388 idx = tswap32(target_ldt_info->entry_number);
3389 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3390 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3391 unlock_user_struct(target_ldt_info, ptr, 1);
3392 return -TARGET_EINVAL;
3394 lp = (uint32_t *)(gdt_table + idx);
3395 entry_1 = tswap32(lp[0]);
3396 entry_2 = tswap32(lp[1]);
3398 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3399 contents = (entry_2 >> 10) & 3;
3400 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3401 seg_32bit = (entry_2 >> 22) & 1;
3402 limit_in_pages = (entry_2 >> 23) & 1;
3403 useable = (entry_2 >> 20) & 1;
3407 lm = (entry_2 >> 21) & 1;
3409 flags = (seg_32bit << 0) | (contents << 1) |
3410 (read_exec_only << 3) | (limit_in_pages << 4) |
3411 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3412 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3413 base_addr = (entry_1 >> 16) |
3414 (entry_2 & 0xff000000) |
3415 ((entry_2 & 0xff) << 16);
3416 target_ldt_info->base_addr = tswapl(base_addr);
3417 target_ldt_info->limit = tswap32(limit);
3418 target_ldt_info->flags = tswap32(flags);
3419 unlock_user_struct(target_ldt_info, ptr, 1);
3422 #endif /* TARGET_I386 && TARGET_ABI32 */
3424 #ifndef TARGET_ABI32
3425 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3432 case TARGET_ARCH_SET_GS:
3433 case TARGET_ARCH_SET_FS:
3434 if (code == TARGET_ARCH_SET_GS)
3438 cpu_x86_load_seg(env, idx, 0);
3439 env->segs[idx].base = addr;
3441 case TARGET_ARCH_GET_GS:
3442 case TARGET_ARCH_GET_FS:
3443 if (code == TARGET_ARCH_GET_GS)
3447 val = env->segs[idx].base;
3448 if (put_user(val, addr, abi_ulong))
3449 return -TARGET_EFAULT;
3452 ret = -TARGET_EINVAL;
3459 #endif /* defined(TARGET_I386) */
3461 #if defined(USE_NPTL)
3463 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3465 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3468 pthread_mutex_t mutex;
3469 pthread_cond_t cond;
3473 abi_ulong child_tidptr;
3474 abi_ulong parent_tidptr;
3478 static void *clone_func(void *arg)
3480 new_thread_info *info = arg;
3486 ts = (TaskState *)thread_env->opaque;
3487 info->tid = gettid();
3489 if (info->child_tidptr)
3490 put_user_u32(info->tid, info->child_tidptr);
3491 if (info->parent_tidptr)
3492 put_user_u32(info->tid, info->parent_tidptr);
3493 /* Enable signals. */
3494 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3495 /* Signal to the parent that we're ready. */
3496 pthread_mutex_lock(&info->mutex);
3497 pthread_cond_broadcast(&info->cond);
3498 pthread_mutex_unlock(&info->mutex);
3499 /* Wait until the parent has finshed initializing the tls state. */
3500 pthread_mutex_lock(&clone_lock);
3501 pthread_mutex_unlock(&clone_lock);
3507 /* this stack is the equivalent of the kernel stack associated with a
3509 #define NEW_STACK_SIZE 8192
3511 static int clone_func(void *arg)
3513 CPUState *env = arg;
3520 /* do_fork() Must return host values and target errnos (unlike most
3521 do_*() functions). */
3522 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3523 abi_ulong parent_tidptr, target_ulong newtls,
3524 abi_ulong child_tidptr)
3530 #if defined(USE_NPTL)
3531 unsigned int nptl_flags;
3535 /* Emulate vfork() with fork() */
3536 if (flags & CLONE_VFORK)
3537 flags &= ~(CLONE_VFORK | CLONE_VM);
3539 if (flags & CLONE_VM) {
3540 TaskState *parent_ts = (TaskState *)env->opaque;
3541 #if defined(USE_NPTL)
3542 new_thread_info info;
3543 pthread_attr_t attr;
3545 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3546 init_task_state(ts);
3547 new_stack = ts->stack;
3548 /* we create a new CPU instance. */
3549 new_env = cpu_copy(env);
3550 /* Init regs that differ from the parent. */
3551 cpu_clone_regs(new_env, newsp);
3552 new_env->opaque = ts;
3553 ts->bprm = parent_ts->bprm;
3554 ts->info = parent_ts->info;
3555 #if defined(USE_NPTL)
3557 flags &= ~CLONE_NPTL_FLAGS2;
3559 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3560 ts->child_tidptr = child_tidptr;
3563 if (nptl_flags & CLONE_SETTLS)
3564 cpu_set_tls (new_env, newtls);
3566 /* Grab a mutex so that thread setup appears atomic. */
3567 pthread_mutex_lock(&clone_lock);
3569 memset(&info, 0, sizeof(info));
3570 pthread_mutex_init(&info.mutex, NULL);
3571 pthread_mutex_lock(&info.mutex);
3572 pthread_cond_init(&info.cond, NULL);
3574 if (nptl_flags & CLONE_CHILD_SETTID)
3575 info.child_tidptr = child_tidptr;
3576 if (nptl_flags & CLONE_PARENT_SETTID)
3577 info.parent_tidptr = parent_tidptr;
3579 ret = pthread_attr_init(&attr);
3580 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3581 /* It is not safe to deliver signals until the child has finished
3582 initializing, so temporarily block all signals. */
3583 sigfillset(&sigmask);
3584 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3586 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3587 /* TODO: Free new CPU state if thread creation failed. */
3589 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3590 pthread_attr_destroy(&attr);
3592 /* Wait for the child to initialize. */
3593 pthread_cond_wait(&info.cond, &info.mutex);
3595 if (flags & CLONE_PARENT_SETTID)
3596 put_user_u32(ret, parent_tidptr);
3600 pthread_mutex_unlock(&info.mutex);
3601 pthread_cond_destroy(&info.cond);
3602 pthread_mutex_destroy(&info.mutex);
3603 pthread_mutex_unlock(&clone_lock);
3605 if (flags & CLONE_NPTL_FLAGS2)
3607 /* This is probably going to die very quickly, but do it anyway. */
3609 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3611 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3615 /* if no CLONE_VM, we consider it is a fork */
3616 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3621 /* Child Process. */
3622 cpu_clone_regs(env, newsp);
3624 #if defined(USE_NPTL)
3625 /* There is a race condition here. The parent process could
3626 theoretically read the TID in the child process before the child
3627 tid is set. This would require using either ptrace
3628 (not implemented) or having *_tidptr to point at a shared memory
3629 mapping. We can't repeat the spinlock hack used above because
3630 the child process gets its own copy of the lock. */
3631 if (flags & CLONE_CHILD_SETTID)
3632 put_user_u32(gettid(), child_tidptr);
3633 if (flags & CLONE_PARENT_SETTID)
3634 put_user_u32(gettid(), parent_tidptr);
3635 ts = (TaskState *)env->opaque;
3636 if (flags & CLONE_SETTLS)
3637 cpu_set_tls (env, newtls);
3638 if (flags & CLONE_CHILD_CLEARTID)
3639 ts->child_tidptr = child_tidptr;
3648 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3651 struct target_flock *target_fl;
3652 struct flock64 fl64;
3653 struct target_flock64 *target_fl64;
3657 case TARGET_F_GETLK:
3658 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3659 return -TARGET_EFAULT;
3660 fl.l_type = tswap16(target_fl->l_type);
3661 fl.l_whence = tswap16(target_fl->l_whence);
3662 fl.l_start = tswapl(target_fl->l_start);
3663 fl.l_len = tswapl(target_fl->l_len);
3664 fl.l_pid = tswapl(target_fl->l_pid);
3665 unlock_user_struct(target_fl, arg, 0);
3666 ret = get_errno(fcntl(fd, F_GETLK, &fl));
3668 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3669 return -TARGET_EFAULT;
3670 target_fl->l_type = tswap16(fl.l_type);
3671 target_fl->l_whence = tswap16(fl.l_whence);
3672 target_fl->l_start = tswapl(fl.l_start);
3673 target_fl->l_len = tswapl(fl.l_len);
3674 target_fl->l_pid = tswapl(fl.l_pid);
3675 unlock_user_struct(target_fl, arg, 1);
3679 case TARGET_F_SETLK:
3680 case TARGET_F_SETLKW:
3681 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3682 return -TARGET_EFAULT;
3683 fl.l_type = tswap16(target_fl->l_type);
3684 fl.l_whence = tswap16(target_fl->l_whence);
3685 fl.l_start = tswapl(target_fl->l_start);
3686 fl.l_len = tswapl(target_fl->l_len);
3687 fl.l_pid = tswapl(target_fl->l_pid);
3688 unlock_user_struct(target_fl, arg, 0);
3689 ret = get_errno(fcntl(fd, F_SETLK+(cmd-TARGET_F_SETLK), &fl));
3692 case TARGET_F_GETLK64:
3693 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3694 return -TARGET_EFAULT;
3695 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3696 fl64.l_whence = tswap16(target_fl64->l_whence);
3697 fl64.l_start = tswapl(target_fl64->l_start);
3698 fl64.l_len = tswapl(target_fl64->l_len);
3699 fl64.l_pid = tswap16(target_fl64->l_pid);
3700 unlock_user_struct(target_fl64, arg, 0);
3701 ret = get_errno(fcntl(fd, F_GETLK64, &fl64));
3703 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3704 return -TARGET_EFAULT;
3705 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3706 target_fl64->l_whence = tswap16(fl64.l_whence);
3707 target_fl64->l_start = tswapl(fl64.l_start);
3708 target_fl64->l_len = tswapl(fl64.l_len);
3709 target_fl64->l_pid = tswapl(fl64.l_pid);
3710 unlock_user_struct(target_fl64, arg, 1);
3713 case TARGET_F_SETLK64:
3714 case TARGET_F_SETLKW64:
3715 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3716 return -TARGET_EFAULT;
3717 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3718 fl64.l_whence = tswap16(target_fl64->l_whence);
3719 fl64.l_start = tswapl(target_fl64->l_start);
3720 fl64.l_len = tswapl(target_fl64->l_len);
3721 fl64.l_pid = tswap16(target_fl64->l_pid);
3722 unlock_user_struct(target_fl64, arg, 0);
3723 ret = get_errno(fcntl(fd, F_SETLK64+(cmd-TARGET_F_SETLK64), &fl64));
3727 ret = get_errno(fcntl(fd, cmd, arg));
3729 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3734 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3738 ret = get_errno(fcntl(fd, cmd, arg));
3746 static inline int high2lowuid(int uid)
3754 static inline int high2lowgid(int gid)
3762 static inline int low2highuid(int uid)
3764 if ((int16_t)uid == -1)
3770 static inline int low2highgid(int gid)
3772 if ((int16_t)gid == -1)
3778 #endif /* USE_UID16 */
3780 void syscall_init(void)
3783 const argtype *arg_type;
3787 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3788 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3789 #include "syscall_types.h"
3791 #undef STRUCT_SPECIAL
3793 /* we patch the ioctl size if necessary. We rely on the fact that
3794 no ioctl has all the bits at '1' in the size field */
3796 while (ie->target_cmd != 0) {
3797 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3798 TARGET_IOC_SIZEMASK) {
3799 arg_type = ie->arg_type;
3800 if (arg_type[0] != TYPE_PTR) {
3801 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3806 size = thunk_type_size(arg_type, 0);
3807 ie->target_cmd = (ie->target_cmd &
3808 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3809 (size << TARGET_IOC_SIZESHIFT);
3812 /* Build target_to_host_errno_table[] table from
3813 * host_to_target_errno_table[]. */
3814 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3815 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3817 /* automatic consistency check if same arch */
3818 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3819 (defined(__x86_64__) && defined(TARGET_X86_64))
3820 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3821 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3822 ie->name, ie->target_cmd, ie->host_cmd);
3829 #if TARGET_ABI_BITS == 32
3830 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3832 #ifdef TARGET_WORDS_BIGENDIAN
3833 return ((uint64_t)word0 << 32) | word1;
3835 return ((uint64_t)word1 << 32) | word0;
3838 #else /* TARGET_ABI_BITS == 32 */
3839 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3843 #endif /* TARGET_ABI_BITS != 32 */
3845 #ifdef TARGET_NR_truncate64
3846 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3852 if (((CPUARMState *)cpu_env)->eabi)
3858 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3862 #ifdef TARGET_NR_ftruncate64
3863 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3869 if (((CPUARMState *)cpu_env)->eabi)
3875 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3879 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3880 abi_ulong target_addr)
3882 struct target_timespec *target_ts;
3884 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3885 return -TARGET_EFAULT;
3886 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3887 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3888 unlock_user_struct(target_ts, target_addr, 0);
3892 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3893 struct timespec *host_ts)
3895 struct target_timespec *target_ts;
3897 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3898 return -TARGET_EFAULT;
3899 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3900 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3901 unlock_user_struct(target_ts, target_addr, 1);
3905 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3906 static inline abi_long host_to_target_stat64(void *cpu_env,
3907 abi_ulong target_addr,
3908 struct stat *host_st)
3911 if (((CPUARMState *)cpu_env)->eabi) {
3912 struct target_eabi_stat64 *target_st;
3914 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3915 return -TARGET_EFAULT;
3916 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3917 __put_user(host_st->st_dev, &target_st->st_dev);
3918 __put_user(host_st->st_ino, &target_st->st_ino);
3919 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3920 __put_user(host_st->st_ino, &target_st->__st_ino);
3922 __put_user(host_st->st_mode, &target_st->st_mode);
3923 __put_user(host_st->st_nlink, &target_st->st_nlink);
3924 __put_user(host_st->st_uid, &target_st->st_uid);
3925 __put_user(host_st->st_gid, &target_st->st_gid);
3926 __put_user(host_st->st_rdev, &target_st->st_rdev);
3927 __put_user(host_st->st_size, &target_st->st_size);
3928 __put_user(host_st->st_blksize, &target_st->st_blksize);
3929 __put_user(host_st->st_blocks, &target_st->st_blocks);
3930 __put_user(host_st->st_atime, &target_st->target_st_atime);
3931 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3932 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3933 unlock_user_struct(target_st, target_addr, 1);
3937 #if TARGET_LONG_BITS == 64
3938 struct target_stat *target_st;
3940 struct target_stat64 *target_st;
3943 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3944 return -TARGET_EFAULT;
3945 memset(target_st, 0, sizeof(*target_st));
3946 __put_user(host_st->st_dev, &target_st->st_dev);
3947 __put_user(host_st->st_ino, &target_st->st_ino);
3948 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3949 __put_user(host_st->st_ino, &target_st->__st_ino);
3951 __put_user(host_st->st_mode, &target_st->st_mode);
3952 __put_user(host_st->st_nlink, &target_st->st_nlink);
3953 __put_user(host_st->st_uid, &target_st->st_uid);
3954 __put_user(host_st->st_gid, &target_st->st_gid);
3955 __put_user(host_st->st_rdev, &target_st->st_rdev);
3956 /* XXX: better use of kernel struct */
3957 __put_user(host_st->st_size, &target_st->st_size);
3958 __put_user(host_st->st_blksize, &target_st->st_blksize);
3959 __put_user(host_st->st_blocks, &target_st->st_blocks);
3960 __put_user(host_st->st_atime, &target_st->target_st_atime);
3961 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3962 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3963 unlock_user_struct(target_st, target_addr, 1);
3970 #if defined(USE_NPTL)
3971 /* ??? Using host futex calls even when target atomic operations
3972 are not really atomic probably breaks things. However implementing
3973 futexes locally would make futexes shared between multiple processes
3974 tricky. However they're probably useless because guest atomic
3975 operations won't work either. */
3976 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3977 target_ulong uaddr2, int val3)
3979 struct timespec ts, *pts;
3981 /* ??? We assume FUTEX_* constants are the same on both host
3983 #ifdef FUTEX_CMD_MASK
3984 switch ((op&FUTEX_CMD_MASK)) {
3991 target_to_host_timespec(pts, timeout);
3995 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
3998 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4000 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4002 return get_errno(sys_futex(g2h(uaddr), op, val,
4003 NULL, g2h(uaddr2), 0));
4004 case FUTEX_CMP_REQUEUE:
4005 return get_errno(sys_futex(g2h(uaddr), op, val,
4006 NULL, g2h(uaddr2), tswap32(val3)));
4008 return -TARGET_ENOSYS;
4013 /* Map host to target signal numbers for the wait family of syscalls.
4014 Assume all other status bits are the same. */
4015 static int host_to_target_waitstatus(int status)
4017 if (WIFSIGNALED(status)) {
4018 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4020 if (WIFSTOPPED(status)) {
4021 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4027 int get_osversion(void)
4029 static int osversion;
4030 struct new_utsname buf;
4035 if (qemu_uname_release && *qemu_uname_release) {
4036 s = qemu_uname_release;
4038 if (sys_uname(&buf))
4043 for (i = 0; i < 3; i++) {
4045 while (*s >= '0' && *s <= '9') {
4050 tmp = (tmp << 8) + n;
4058 /* do_syscall() should always have a single exit point at the end so
4059 that actions, such as logging of syscall results, can be performed.
4060 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4061 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4062 abi_long arg2, abi_long arg3, abi_long arg4,
4063 abi_long arg5, abi_long arg6)
4071 gemu_log("syscall %d", num);
4074 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4077 case TARGET_NR_exit:
4079 /* In old applications this may be used to implement _exit(2).
4080 However in threaded applictions it is used for thread termination,
4081 and _exit_group is used for application termination.
4082 Do thread termination if we have more then one thread. */
4083 /* FIXME: This probably breaks if a signal arrives. We should probably
4084 be disabling signals. */
4085 if (first_cpu->next_cpu) {
4092 while (p && p != (CPUState *)cpu_env) {
4093 lastp = &p->next_cpu;
4096 /* If we didn't find the CPU for this thread then something is
4100 /* Remove the CPU from the list. */
4101 *lastp = p->next_cpu;
4103 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4104 if (ts->child_tidptr) {
4105 put_user_u32(0, ts->child_tidptr);
4106 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4109 /* TODO: Free CPU state. */
4116 gdb_exit(cpu_env, arg1);
4118 ret = 0; /* avoid warning */
4120 case TARGET_NR_read:
4124 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4126 ret = get_errno(read(arg1, p, arg3));
4127 unlock_user(p, arg2, ret);
4130 case TARGET_NR_write:
4131 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4133 ret = get_errno(write(arg1, p, arg3));
4134 unlock_user(p, arg2, 0);
4136 case TARGET_NR_open:
4137 if (!(p = lock_user_string(arg1)))
4139 ret = get_errno(open(path(p),
4140 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4142 unlock_user(p, arg1, 0);
4144 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4145 case TARGET_NR_openat:
4146 if (!(p = lock_user_string(arg2)))
4148 ret = get_errno(sys_openat(arg1,
4150 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4152 unlock_user(p, arg2, 0);
4155 case TARGET_NR_close:
4156 ret = get_errno(close(arg1));
4161 case TARGET_NR_fork:
4162 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4164 #ifdef TARGET_NR_waitpid
4165 case TARGET_NR_waitpid:
4168 ret = get_errno(waitpid(arg1, &status, arg3));
4169 if (!is_error(ret) && arg2
4170 && put_user_s32(host_to_target_waitstatus(status), arg2))
4175 #ifdef TARGET_NR_waitid
4176 case TARGET_NR_waitid:
4180 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4181 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4182 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4184 host_to_target_siginfo(p, &info);
4185 unlock_user(p, arg3, sizeof(target_siginfo_t));
4190 #ifdef TARGET_NR_creat /* not on alpha */
4191 case TARGET_NR_creat:
4192 if (!(p = lock_user_string(arg1)))
4194 ret = get_errno(creat(p, arg2));
4195 unlock_user(p, arg1, 0);
4198 case TARGET_NR_link:
4201 p = lock_user_string(arg1);
4202 p2 = lock_user_string(arg2);
4204 ret = -TARGET_EFAULT;
4206 ret = get_errno(link(p, p2));
4207 unlock_user(p2, arg2, 0);
4208 unlock_user(p, arg1, 0);
4211 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4212 case TARGET_NR_linkat:
4217 p = lock_user_string(arg2);
4218 p2 = lock_user_string(arg4);
4220 ret = -TARGET_EFAULT;
4222 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4223 unlock_user(p, arg2, 0);
4224 unlock_user(p2, arg4, 0);
4228 case TARGET_NR_unlink:
4229 if (!(p = lock_user_string(arg1)))
4231 ret = get_errno(unlink(p));
4232 unlock_user(p, arg1, 0);
4234 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4235 case TARGET_NR_unlinkat:
4236 if (!(p = lock_user_string(arg2)))
4238 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4239 unlock_user(p, arg2, 0);
4242 case TARGET_NR_execve:
4244 char **argp, **envp;
4247 abi_ulong guest_argp;
4248 abi_ulong guest_envp;
4254 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4255 if (get_user_ual(addr, gp))
4263 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4264 if (get_user_ual(addr, gp))
4271 argp = alloca((argc + 1) * sizeof(void *));
4272 envp = alloca((envc + 1) * sizeof(void *));
4274 for (gp = guest_argp, q = argp; gp;
4275 gp += sizeof(abi_ulong), q++) {
4276 if (get_user_ual(addr, gp))
4280 if (!(*q = lock_user_string(addr)))
4285 for (gp = guest_envp, q = envp; gp;
4286 gp += sizeof(abi_ulong), q++) {
4287 if (get_user_ual(addr, gp))
4291 if (!(*q = lock_user_string(addr)))
4296 if (!(p = lock_user_string(arg1)))
4298 ret = get_errno(execve(p, argp, envp));
4299 unlock_user(p, arg1, 0);
4304 ret = -TARGET_EFAULT;
4307 for (gp = guest_argp, q = argp; *q;
4308 gp += sizeof(abi_ulong), q++) {
4309 if (get_user_ual(addr, gp)
4312 unlock_user(*q, addr, 0);
4314 for (gp = guest_envp, q = envp; *q;
4315 gp += sizeof(abi_ulong), q++) {
4316 if (get_user_ual(addr, gp)
4319 unlock_user(*q, addr, 0);
4323 case TARGET_NR_chdir:
4324 if (!(p = lock_user_string(arg1)))
4326 ret = get_errno(chdir(p));
4327 unlock_user(p, arg1, 0);
4329 #ifdef TARGET_NR_time
4330 case TARGET_NR_time:
4333 ret = get_errno(time(&host_time));
4336 && put_user_sal(host_time, arg1))
4341 case TARGET_NR_mknod:
4342 if (!(p = lock_user_string(arg1)))
4344 ret = get_errno(mknod(p, arg2, arg3));
4345 unlock_user(p, arg1, 0);
4347 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4348 case TARGET_NR_mknodat:
4349 if (!(p = lock_user_string(arg2)))
4351 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4352 unlock_user(p, arg2, 0);
4355 case TARGET_NR_chmod:
4356 if (!(p = lock_user_string(arg1)))
4358 ret = get_errno(chmod(p, arg2));
4359 unlock_user(p, arg1, 0);
4361 #ifdef TARGET_NR_break
4362 case TARGET_NR_break:
4365 #ifdef TARGET_NR_oldstat
4366 case TARGET_NR_oldstat:
4369 case TARGET_NR_lseek:
4370 ret = get_errno(lseek(arg1, arg2, arg3));
4372 #ifdef TARGET_NR_getxpid
4373 case TARGET_NR_getxpid:
4375 case TARGET_NR_getpid:
4377 ret = get_errno(getpid());
4379 case TARGET_NR_mount:
4381 /* need to look at the data field */
4383 p = lock_user_string(arg1);
4384 p2 = lock_user_string(arg2);
4385 p3 = lock_user_string(arg3);
4386 if (!p || !p2 || !p3)
4387 ret = -TARGET_EFAULT;
4389 /* FIXME - arg5 should be locked, but it isn't clear how to
4390 * do that since it's not guaranteed to be a NULL-terminated
4393 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4394 unlock_user(p, arg1, 0);
4395 unlock_user(p2, arg2, 0);
4396 unlock_user(p3, arg3, 0);
4399 #ifdef TARGET_NR_umount
4400 case TARGET_NR_umount:
4401 if (!(p = lock_user_string(arg1)))
4403 ret = get_errno(umount(p));
4404 unlock_user(p, arg1, 0);
4407 #ifdef TARGET_NR_stime /* not on alpha */
4408 case TARGET_NR_stime:
4411 if (get_user_sal(host_time, arg1))
4413 ret = get_errno(stime(&host_time));
4417 case TARGET_NR_ptrace:
4419 #ifdef TARGET_NR_alarm /* not on alpha */
4420 case TARGET_NR_alarm:
4424 #ifdef TARGET_NR_oldfstat
4425 case TARGET_NR_oldfstat:
4428 #ifdef TARGET_NR_pause /* not on alpha */
4429 case TARGET_NR_pause:
4430 ret = get_errno(pause());
4433 #ifdef TARGET_NR_utime
4434 case TARGET_NR_utime:
4436 struct utimbuf tbuf, *host_tbuf;
4437 struct target_utimbuf *target_tbuf;
4439 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4441 tbuf.actime = tswapl(target_tbuf->actime);
4442 tbuf.modtime = tswapl(target_tbuf->modtime);
4443 unlock_user_struct(target_tbuf, arg2, 0);
4448 if (!(p = lock_user_string(arg1)))
4450 ret = get_errno(utime(p, host_tbuf));
4451 unlock_user(p, arg1, 0);
4455 case TARGET_NR_utimes:
4457 struct timeval *tvp, tv[2];
4459 if (copy_from_user_timeval(&tv[0], arg2)
4460 || copy_from_user_timeval(&tv[1],
4461 arg2 + sizeof(struct target_timeval)))
4467 if (!(p = lock_user_string(arg1)))
4469 ret = get_errno(utimes(p, tvp));
4470 unlock_user(p, arg1, 0);
4473 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4474 case TARGET_NR_futimesat:
4476 struct timeval *tvp, tv[2];
4478 if (copy_from_user_timeval(&tv[0], arg3)
4479 || copy_from_user_timeval(&tv[1],
4480 arg3 + sizeof(struct target_timeval)))
4486 if (!(p = lock_user_string(arg2)))
4488 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4489 unlock_user(p, arg2, 0);
4493 #ifdef TARGET_NR_stty
4494 case TARGET_NR_stty:
4497 #ifdef TARGET_NR_gtty
4498 case TARGET_NR_gtty:
4501 case TARGET_NR_access:
4502 if (!(p = lock_user_string(arg1)))
4504 ret = get_errno(access(p, arg2));
4505 unlock_user(p, arg1, 0);
4507 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4508 case TARGET_NR_faccessat:
4509 if (!(p = lock_user_string(arg2)))
4511 ret = get_errno(sys_faccessat(arg1, p, arg3));
4512 unlock_user(p, arg2, 0);
4515 #ifdef TARGET_NR_nice /* not on alpha */
4516 case TARGET_NR_nice:
4517 ret = get_errno(nice(arg1));
4520 #ifdef TARGET_NR_ftime
4521 case TARGET_NR_ftime:
4524 case TARGET_NR_sync:
4528 case TARGET_NR_kill:
4529 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4531 case TARGET_NR_rename:
4534 p = lock_user_string(arg1);
4535 p2 = lock_user_string(arg2);
4537 ret = -TARGET_EFAULT;
4539 ret = get_errno(rename(p, p2));
4540 unlock_user(p2, arg2, 0);
4541 unlock_user(p, arg1, 0);
4544 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4545 case TARGET_NR_renameat:
4548 p = lock_user_string(arg2);
4549 p2 = lock_user_string(arg4);
4551 ret = -TARGET_EFAULT;
4553 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4554 unlock_user(p2, arg4, 0);
4555 unlock_user(p, arg2, 0);
4559 case TARGET_NR_mkdir:
4560 if (!(p = lock_user_string(arg1)))
4562 ret = get_errno(mkdir(p, arg2));
4563 unlock_user(p, arg1, 0);
4565 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4566 case TARGET_NR_mkdirat:
4567 if (!(p = lock_user_string(arg2)))
4569 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4570 unlock_user(p, arg2, 0);
4573 case TARGET_NR_rmdir:
4574 if (!(p = lock_user_string(arg1)))
4576 ret = get_errno(rmdir(p));
4577 unlock_user(p, arg1, 0);
4580 ret = get_errno(dup(arg1));
4582 case TARGET_NR_pipe:
4583 ret = do_pipe(cpu_env, arg1, 0);
4585 #ifdef TARGET_NR_pipe2
4586 case TARGET_NR_pipe2:
4587 ret = do_pipe(cpu_env, arg1, arg2);
4590 case TARGET_NR_times:
4592 struct target_tms *tmsp;
4594 ret = get_errno(times(&tms));
4596 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4599 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4600 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4601 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4602 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4605 ret = host_to_target_clock_t(ret);
4608 #ifdef TARGET_NR_prof
4609 case TARGET_NR_prof:
4612 #ifdef TARGET_NR_signal
4613 case TARGET_NR_signal:
4616 case TARGET_NR_acct:
4618 ret = get_errno(acct(NULL));
4620 if (!(p = lock_user_string(arg1)))
4622 ret = get_errno(acct(path(p)));
4623 unlock_user(p, arg1, 0);
4626 #ifdef TARGET_NR_umount2 /* not on alpha */
4627 case TARGET_NR_umount2:
4628 if (!(p = lock_user_string(arg1)))
4630 ret = get_errno(umount2(p, arg2));
4631 unlock_user(p, arg1, 0);
4634 #ifdef TARGET_NR_lock
4635 case TARGET_NR_lock:
4638 case TARGET_NR_ioctl:
4639 ret = do_ioctl(arg1, arg2, arg3);
4641 case TARGET_NR_fcntl:
4642 ret = do_fcntl(arg1, arg2, arg3);
4644 #ifdef TARGET_NR_mpx
4648 case TARGET_NR_setpgid:
4649 ret = get_errno(setpgid(arg1, arg2));
4651 #ifdef TARGET_NR_ulimit
4652 case TARGET_NR_ulimit:
4655 #ifdef TARGET_NR_oldolduname
4656 case TARGET_NR_oldolduname:
4659 case TARGET_NR_umask:
4660 ret = get_errno(umask(arg1));
4662 case TARGET_NR_chroot:
4663 if (!(p = lock_user_string(arg1)))
4665 ret = get_errno(chroot(p));
4666 unlock_user(p, arg1, 0);
4668 case TARGET_NR_ustat:
4670 case TARGET_NR_dup2:
4671 ret = get_errno(dup2(arg1, arg2));
4673 #ifdef TARGET_NR_getppid /* not on alpha */
4674 case TARGET_NR_getppid:
4675 ret = get_errno(getppid());
4678 case TARGET_NR_getpgrp:
4679 ret = get_errno(getpgrp());
4681 case TARGET_NR_setsid:
4682 ret = get_errno(setsid());
4684 #ifdef TARGET_NR_sigaction
4685 case TARGET_NR_sigaction:
4687 #if !defined(TARGET_MIPS)
4688 struct target_old_sigaction *old_act;
4689 struct target_sigaction act, oact, *pact;
4691 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4693 act._sa_handler = old_act->_sa_handler;
4694 target_siginitset(&act.sa_mask, old_act->sa_mask);
4695 act.sa_flags = old_act->sa_flags;
4696 act.sa_restorer = old_act->sa_restorer;
4697 unlock_user_struct(old_act, arg2, 0);
4702 ret = get_errno(do_sigaction(arg1, pact, &oact));
4703 if (!is_error(ret) && arg3) {
4704 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4706 old_act->_sa_handler = oact._sa_handler;
4707 old_act->sa_mask = oact.sa_mask.sig[0];
4708 old_act->sa_flags = oact.sa_flags;
4709 old_act->sa_restorer = oact.sa_restorer;
4710 unlock_user_struct(old_act, arg3, 1);
4713 struct target_sigaction act, oact, *pact, *old_act;
4716 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4718 act._sa_handler = old_act->_sa_handler;
4719 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4720 act.sa_flags = old_act->sa_flags;
4721 unlock_user_struct(old_act, arg2, 0);
4727 ret = get_errno(do_sigaction(arg1, pact, &oact));
4729 if (!is_error(ret) && arg3) {
4730 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4732 old_act->_sa_handler = oact._sa_handler;
4733 old_act->sa_flags = oact.sa_flags;
4734 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4735 old_act->sa_mask.sig[1] = 0;
4736 old_act->sa_mask.sig[2] = 0;
4737 old_act->sa_mask.sig[3] = 0;
4738 unlock_user_struct(old_act, arg3, 1);
4744 case TARGET_NR_rt_sigaction:
4746 struct target_sigaction *act;
4747 struct target_sigaction *oact;
4750 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4755 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4756 ret = -TARGET_EFAULT;
4757 goto rt_sigaction_fail;
4761 ret = get_errno(do_sigaction(arg1, act, oact));
4764 unlock_user_struct(act, arg2, 0);
4766 unlock_user_struct(oact, arg3, 1);
4769 #ifdef TARGET_NR_sgetmask /* not on alpha */
4770 case TARGET_NR_sgetmask:
4773 abi_ulong target_set;
4774 sigprocmask(0, NULL, &cur_set);
4775 host_to_target_old_sigset(&target_set, &cur_set);
4780 #ifdef TARGET_NR_ssetmask /* not on alpha */
4781 case TARGET_NR_ssetmask:
4783 sigset_t set, oset, cur_set;
4784 abi_ulong target_set = arg1;
4785 sigprocmask(0, NULL, &cur_set);
4786 target_to_host_old_sigset(&set, &target_set);
4787 sigorset(&set, &set, &cur_set);
4788 sigprocmask(SIG_SETMASK, &set, &oset);
4789 host_to_target_old_sigset(&target_set, &oset);
4794 #ifdef TARGET_NR_sigprocmask
4795 case TARGET_NR_sigprocmask:
4798 sigset_t set, oldset, *set_ptr;
4802 case TARGET_SIG_BLOCK:
4805 case TARGET_SIG_UNBLOCK:
4808 case TARGET_SIG_SETMASK:
4812 ret = -TARGET_EINVAL;
4815 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4817 target_to_host_old_sigset(&set, p);
4818 unlock_user(p, arg2, 0);
4824 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4825 if (!is_error(ret) && arg3) {
4826 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4828 host_to_target_old_sigset(p, &oldset);
4829 unlock_user(p, arg3, sizeof(target_sigset_t));
4834 case TARGET_NR_rt_sigprocmask:
4837 sigset_t set, oldset, *set_ptr;
4841 case TARGET_SIG_BLOCK:
4844 case TARGET_SIG_UNBLOCK:
4847 case TARGET_SIG_SETMASK:
4851 ret = -TARGET_EINVAL;
4854 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4856 target_to_host_sigset(&set, p);
4857 unlock_user(p, arg2, 0);
4863 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4864 if (!is_error(ret) && arg3) {
4865 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4867 host_to_target_sigset(p, &oldset);
4868 unlock_user(p, arg3, sizeof(target_sigset_t));
4872 #ifdef TARGET_NR_sigpending
4873 case TARGET_NR_sigpending:
4876 ret = get_errno(sigpending(&set));
4877 if (!is_error(ret)) {
4878 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4880 host_to_target_old_sigset(p, &set);
4881 unlock_user(p, arg1, sizeof(target_sigset_t));
4886 case TARGET_NR_rt_sigpending:
4889 ret = get_errno(sigpending(&set));
4890 if (!is_error(ret)) {
4891 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4893 host_to_target_sigset(p, &set);
4894 unlock_user(p, arg1, sizeof(target_sigset_t));
4898 #ifdef TARGET_NR_sigsuspend
4899 case TARGET_NR_sigsuspend:
4902 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4904 target_to_host_old_sigset(&set, p);
4905 unlock_user(p, arg1, 0);
4906 ret = get_errno(sigsuspend(&set));
4910 case TARGET_NR_rt_sigsuspend:
4913 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4915 target_to_host_sigset(&set, p);
4916 unlock_user(p, arg1, 0);
4917 ret = get_errno(sigsuspend(&set));
4920 case TARGET_NR_rt_sigtimedwait:
4923 struct timespec uts, *puts;
4926 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4928 target_to_host_sigset(&set, p);
4929 unlock_user(p, arg1, 0);
4932 target_to_host_timespec(puts, arg3);
4936 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4937 if (!is_error(ret) && arg2) {
4938 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4940 host_to_target_siginfo(p, &uinfo);
4941 unlock_user(p, arg2, sizeof(target_siginfo_t));
4945 case TARGET_NR_rt_sigqueueinfo:
4948 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4950 target_to_host_siginfo(&uinfo, p);
4951 unlock_user(p, arg1, 0);
4952 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4955 #ifdef TARGET_NR_sigreturn
4956 case TARGET_NR_sigreturn:
4957 /* NOTE: ret is eax, so not transcoding must be done */
4958 ret = do_sigreturn(cpu_env);
4961 case TARGET_NR_rt_sigreturn:
4962 /* NOTE: ret is eax, so not transcoding must be done */
4963 ret = do_rt_sigreturn(cpu_env);
4965 case TARGET_NR_sethostname:
4966 if (!(p = lock_user_string(arg1)))
4968 ret = get_errno(sethostname(p, arg2));
4969 unlock_user(p, arg1, 0);
4971 case TARGET_NR_setrlimit:
4973 /* XXX: convert resource ? */
4974 int resource = arg1;
4975 struct target_rlimit *target_rlim;
4977 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4979 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4980 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4981 unlock_user_struct(target_rlim, arg2, 0);
4982 ret = get_errno(setrlimit(resource, &rlim));
4985 case TARGET_NR_getrlimit:
4987 /* XXX: convert resource ? */
4988 int resource = arg1;
4989 struct target_rlimit *target_rlim;
4992 ret = get_errno(getrlimit(resource, &rlim));
4993 if (!is_error(ret)) {
4994 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4996 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4997 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4998 unlock_user_struct(target_rlim, arg2, 1);
5002 case TARGET_NR_getrusage:
5004 struct rusage rusage;
5005 ret = get_errno(getrusage(arg1, &rusage));
5006 if (!is_error(ret)) {
5007 host_to_target_rusage(arg2, &rusage);
5011 case TARGET_NR_gettimeofday:
5014 ret = get_errno(gettimeofday(&tv, NULL));
5015 if (!is_error(ret)) {
5016 if (copy_to_user_timeval(arg1, &tv))
5021 case TARGET_NR_settimeofday:
5024 if (copy_from_user_timeval(&tv, arg1))
5026 ret = get_errno(settimeofday(&tv, NULL));
5029 #ifdef TARGET_NR_select
5030 case TARGET_NR_select:
5032 struct target_sel_arg_struct *sel;
5033 abi_ulong inp, outp, exp, tvp;
5036 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5038 nsel = tswapl(sel->n);
5039 inp = tswapl(sel->inp);
5040 outp = tswapl(sel->outp);
5041 exp = tswapl(sel->exp);
5042 tvp = tswapl(sel->tvp);
5043 unlock_user_struct(sel, arg1, 0);
5044 ret = do_select(nsel, inp, outp, exp, tvp);
5048 case TARGET_NR_symlink:
5051 p = lock_user_string(arg1);
5052 p2 = lock_user_string(arg2);
5054 ret = -TARGET_EFAULT;
5056 ret = get_errno(symlink(p, p2));
5057 unlock_user(p2, arg2, 0);
5058 unlock_user(p, arg1, 0);
5061 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5062 case TARGET_NR_symlinkat:
5065 p = lock_user_string(arg1);
5066 p2 = lock_user_string(arg3);
5068 ret = -TARGET_EFAULT;
5070 ret = get_errno(sys_symlinkat(p, arg2, p2));
5071 unlock_user(p2, arg3, 0);
5072 unlock_user(p, arg1, 0);
5076 #ifdef TARGET_NR_oldlstat
5077 case TARGET_NR_oldlstat:
5080 case TARGET_NR_readlink:
5083 p = lock_user_string(arg1);
5084 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5086 ret = -TARGET_EFAULT;
5088 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5089 char real[PATH_MAX];
5090 temp = realpath(exec_path,real);
5091 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5092 snprintf((char *)p2, arg3, "%s", real);
5095 ret = get_errno(readlink(path(p), p2, arg3));
5097 unlock_user(p2, arg2, ret);
5098 unlock_user(p, arg1, 0);
5101 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5102 case TARGET_NR_readlinkat:
5105 p = lock_user_string(arg2);
5106 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5108 ret = -TARGET_EFAULT;
5110 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5111 unlock_user(p2, arg3, ret);
5112 unlock_user(p, arg2, 0);
5116 #ifdef TARGET_NR_uselib
5117 case TARGET_NR_uselib:
5120 #ifdef TARGET_NR_swapon
5121 case TARGET_NR_swapon:
5122 if (!(p = lock_user_string(arg1)))
5124 ret = get_errno(swapon(p, arg2));
5125 unlock_user(p, arg1, 0);
5128 case TARGET_NR_reboot:
5130 #ifdef TARGET_NR_readdir
5131 case TARGET_NR_readdir:
5134 #ifdef TARGET_NR_mmap
5135 case TARGET_NR_mmap:
5136 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5139 abi_ulong v1, v2, v3, v4, v5, v6;
5140 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5148 unlock_user(v, arg1, 0);
5149 ret = get_errno(target_mmap(v1, v2, v3,
5150 target_to_host_bitmask(v4, mmap_flags_tbl),
5154 ret = get_errno(target_mmap(arg1, arg2, arg3,
5155 target_to_host_bitmask(arg4, mmap_flags_tbl),
5161 #ifdef TARGET_NR_mmap2
5162 case TARGET_NR_mmap2:
5164 #define MMAP_SHIFT 12
5166 ret = get_errno(target_mmap(arg1, arg2, arg3,
5167 target_to_host_bitmask(arg4, mmap_flags_tbl),
5169 arg6 << MMAP_SHIFT));
5172 case TARGET_NR_munmap:
5173 ret = get_errno(target_munmap(arg1, arg2));
5175 case TARGET_NR_mprotect:
5176 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5178 #ifdef TARGET_NR_mremap
5179 case TARGET_NR_mremap:
5180 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5183 /* ??? msync/mlock/munlock are broken for softmmu. */
5184 #ifdef TARGET_NR_msync
5185 case TARGET_NR_msync:
5186 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5189 #ifdef TARGET_NR_mlock
5190 case TARGET_NR_mlock:
5191 ret = get_errno(mlock(g2h(arg1), arg2));
5194 #ifdef TARGET_NR_munlock
5195 case TARGET_NR_munlock:
5196 ret = get_errno(munlock(g2h(arg1), arg2));
5199 #ifdef TARGET_NR_mlockall
5200 case TARGET_NR_mlockall:
5201 ret = get_errno(mlockall(arg1));
5204 #ifdef TARGET_NR_munlockall
5205 case TARGET_NR_munlockall:
5206 ret = get_errno(munlockall());
5209 case TARGET_NR_truncate:
5210 if (!(p = lock_user_string(arg1)))
5212 ret = get_errno(truncate(p, arg2));
5213 unlock_user(p, arg1, 0);
5215 case TARGET_NR_ftruncate:
5216 ret = get_errno(ftruncate(arg1, arg2));
5218 case TARGET_NR_fchmod:
5219 ret = get_errno(fchmod(arg1, arg2));
5221 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5222 case TARGET_NR_fchmodat:
5223 if (!(p = lock_user_string(arg2)))
5225 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5226 unlock_user(p, arg2, 0);
5229 case TARGET_NR_getpriority:
5230 /* libc does special remapping of the return value of
5231 * sys_getpriority() so it's just easiest to call
5232 * sys_getpriority() directly rather than through libc. */
5233 ret = sys_getpriority(arg1, arg2);
5235 case TARGET_NR_setpriority:
5236 ret = get_errno(setpriority(arg1, arg2, arg3));
5238 #ifdef TARGET_NR_profil
5239 case TARGET_NR_profil:
5242 case TARGET_NR_statfs:
5243 if (!(p = lock_user_string(arg1)))
5245 ret = get_errno(statfs(path(p), &stfs));
5246 unlock_user(p, arg1, 0);
5248 if (!is_error(ret)) {
5249 struct target_statfs *target_stfs;
5251 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5253 __put_user(stfs.f_type, &target_stfs->f_type);
5254 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5255 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5256 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5257 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5258 __put_user(stfs.f_files, &target_stfs->f_files);
5259 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5260 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5261 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5262 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5263 unlock_user_struct(target_stfs, arg2, 1);
5266 case TARGET_NR_fstatfs:
5267 ret = get_errno(fstatfs(arg1, &stfs));
5268 goto convert_statfs;
5269 #ifdef TARGET_NR_statfs64
5270 case TARGET_NR_statfs64:
5271 if (!(p = lock_user_string(arg1)))
5273 ret = get_errno(statfs(path(p), &stfs));
5274 unlock_user(p, arg1, 0);
5276 if (!is_error(ret)) {
5277 struct target_statfs64 *target_stfs;
5279 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5281 __put_user(stfs.f_type, &target_stfs->f_type);
5282 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5283 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5284 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5285 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5286 __put_user(stfs.f_files, &target_stfs->f_files);
5287 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5288 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5289 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5290 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5291 unlock_user_struct(target_stfs, arg3, 1);
5294 case TARGET_NR_fstatfs64:
5295 ret = get_errno(fstatfs(arg1, &stfs));
5296 goto convert_statfs64;
5298 #ifdef TARGET_NR_ioperm
5299 case TARGET_NR_ioperm:
5302 #ifdef TARGET_NR_socketcall
5303 case TARGET_NR_socketcall:
5304 ret = do_socketcall(arg1, arg2);
5307 #ifdef TARGET_NR_accept
5308 case TARGET_NR_accept:
5309 ret = do_accept(arg1, arg2, arg3);
5312 #ifdef TARGET_NR_bind
5313 case TARGET_NR_bind:
5314 ret = do_bind(arg1, arg2, arg3);
5317 #ifdef TARGET_NR_connect
5318 case TARGET_NR_connect:
5319 ret = do_connect(arg1, arg2, arg3);
5322 #ifdef TARGET_NR_getpeername
5323 case TARGET_NR_getpeername:
5324 ret = do_getpeername(arg1, arg2, arg3);
5327 #ifdef TARGET_NR_getsockname
5328 case TARGET_NR_getsockname:
5329 ret = do_getsockname(arg1, arg2, arg3);
5332 #ifdef TARGET_NR_getsockopt
5333 case TARGET_NR_getsockopt:
5334 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5337 #ifdef TARGET_NR_listen
5338 case TARGET_NR_listen:
5339 ret = get_errno(listen(arg1, arg2));
5342 #ifdef TARGET_NR_recv
5343 case TARGET_NR_recv:
5344 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5347 #ifdef TARGET_NR_recvfrom
5348 case TARGET_NR_recvfrom:
5349 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5352 #ifdef TARGET_NR_recvmsg
5353 case TARGET_NR_recvmsg:
5354 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5357 #ifdef TARGET_NR_send
5358 case TARGET_NR_send:
5359 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5362 #ifdef TARGET_NR_sendmsg
5363 case TARGET_NR_sendmsg:
5364 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5367 #ifdef TARGET_NR_sendto
5368 case TARGET_NR_sendto:
5369 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5372 #ifdef TARGET_NR_shutdown
5373 case TARGET_NR_shutdown:
5374 ret = get_errno(shutdown(arg1, arg2));
5377 #ifdef TARGET_NR_socket
5378 case TARGET_NR_socket:
5379 ret = do_socket(arg1, arg2, arg3);
5382 #ifdef TARGET_NR_socketpair
5383 case TARGET_NR_socketpair:
5384 ret = do_socketpair(arg1, arg2, arg3, arg4);
5387 #ifdef TARGET_NR_setsockopt
5388 case TARGET_NR_setsockopt:
5389 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5393 case TARGET_NR_syslog:
5394 if (!(p = lock_user_string(arg2)))
5396 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5397 unlock_user(p, arg2, 0);
5400 case TARGET_NR_setitimer:
5402 struct itimerval value, ovalue, *pvalue;
5406 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5407 || copy_from_user_timeval(&pvalue->it_value,
5408 arg2 + sizeof(struct target_timeval)))
5413 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5414 if (!is_error(ret) && arg3) {
5415 if (copy_to_user_timeval(arg3,
5416 &ovalue.it_interval)
5417 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5423 case TARGET_NR_getitimer:
5425 struct itimerval value;
5427 ret = get_errno(getitimer(arg1, &value));
5428 if (!is_error(ret) && arg2) {
5429 if (copy_to_user_timeval(arg2,
5431 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5437 case TARGET_NR_stat:
5438 if (!(p = lock_user_string(arg1)))
5440 ret = get_errno(stat(path(p), &st));
5441 unlock_user(p, arg1, 0);
5443 case TARGET_NR_lstat:
5444 if (!(p = lock_user_string(arg1)))
5446 ret = get_errno(lstat(path(p), &st));
5447 unlock_user(p, arg1, 0);
5449 case TARGET_NR_fstat:
5451 ret = get_errno(fstat(arg1, &st));
5453 if (!is_error(ret)) {
5454 struct target_stat *target_st;
5456 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5458 __put_user(st.st_dev, &target_st->st_dev);
5459 __put_user(st.st_ino, &target_st->st_ino);
5460 __put_user(st.st_mode, &target_st->st_mode);
5461 __put_user(st.st_uid, &target_st->st_uid);
5462 __put_user(st.st_gid, &target_st->st_gid);
5463 __put_user(st.st_nlink, &target_st->st_nlink);
5464 __put_user(st.st_rdev, &target_st->st_rdev);
5465 __put_user(st.st_size, &target_st->st_size);
5466 __put_user(st.st_blksize, &target_st->st_blksize);
5467 __put_user(st.st_blocks, &target_st->st_blocks);
5468 __put_user(st.st_atime, &target_st->target_st_atime);
5469 __put_user(st.st_mtime, &target_st->target_st_mtime);
5470 __put_user(st.st_ctime, &target_st->target_st_ctime);
5471 unlock_user_struct(target_st, arg2, 1);
5475 #ifdef TARGET_NR_olduname
5476 case TARGET_NR_olduname:
5479 #ifdef TARGET_NR_iopl
5480 case TARGET_NR_iopl:
5483 case TARGET_NR_vhangup:
5484 ret = get_errno(vhangup());
5486 #ifdef TARGET_NR_idle
5487 case TARGET_NR_idle:
5490 #ifdef TARGET_NR_syscall
5491 case TARGET_NR_syscall:
5492 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5495 case TARGET_NR_wait4:
5498 abi_long status_ptr = arg2;
5499 struct rusage rusage, *rusage_ptr;
5500 abi_ulong target_rusage = arg4;
5502 rusage_ptr = &rusage;
5505 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5506 if (!is_error(ret)) {
5508 status = host_to_target_waitstatus(status);
5509 if (put_user_s32(status, status_ptr))
5513 host_to_target_rusage(target_rusage, &rusage);
5517 #ifdef TARGET_NR_swapoff
5518 case TARGET_NR_swapoff:
5519 if (!(p = lock_user_string(arg1)))
5521 ret = get_errno(swapoff(p));
5522 unlock_user(p, arg1, 0);
5525 case TARGET_NR_sysinfo:
5527 struct target_sysinfo *target_value;
5528 struct sysinfo value;
5529 ret = get_errno(sysinfo(&value));
5530 if (!is_error(ret) && arg1)
5532 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5534 __put_user(value.uptime, &target_value->uptime);
5535 __put_user(value.loads[0], &target_value->loads[0]);
5536 __put_user(value.loads[1], &target_value->loads[1]);
5537 __put_user(value.loads[2], &target_value->loads[2]);
5538 __put_user(value.totalram, &target_value->totalram);
5539 __put_user(value.freeram, &target_value->freeram);
5540 __put_user(value.sharedram, &target_value->sharedram);
5541 __put_user(value.bufferram, &target_value->bufferram);
5542 __put_user(value.totalswap, &target_value->totalswap);
5543 __put_user(value.freeswap, &target_value->freeswap);
5544 __put_user(value.procs, &target_value->procs);
5545 __put_user(value.totalhigh, &target_value->totalhigh);
5546 __put_user(value.freehigh, &target_value->freehigh);
5547 __put_user(value.mem_unit, &target_value->mem_unit);
5548 unlock_user_struct(target_value, arg1, 1);
5552 #ifdef TARGET_NR_ipc
5554 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5557 #ifdef TARGET_NR_semget
5558 case TARGET_NR_semget:
5559 ret = get_errno(semget(arg1, arg2, arg3));
5562 #ifdef TARGET_NR_semop
5563 case TARGET_NR_semop:
5564 ret = get_errno(do_semop(arg1, arg2, arg3));
5567 #ifdef TARGET_NR_semctl
5568 case TARGET_NR_semctl:
5569 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5572 #ifdef TARGET_NR_msgctl
5573 case TARGET_NR_msgctl:
5574 ret = do_msgctl(arg1, arg2, arg3);
5577 #ifdef TARGET_NR_msgget
5578 case TARGET_NR_msgget:
5579 ret = get_errno(msgget(arg1, arg2));
5582 #ifdef TARGET_NR_msgrcv
5583 case TARGET_NR_msgrcv:
5584 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5587 #ifdef TARGET_NR_msgsnd
5588 case TARGET_NR_msgsnd:
5589 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5592 #ifdef TARGET_NR_shmget
5593 case TARGET_NR_shmget:
5594 ret = get_errno(shmget(arg1, arg2, arg3));
5597 #ifdef TARGET_NR_shmctl
5598 case TARGET_NR_shmctl:
5599 ret = do_shmctl(arg1, arg2, arg3);
5602 #ifdef TARGET_NR_shmat
5603 case TARGET_NR_shmat:
5604 ret = do_shmat(arg1, arg2, arg3);
5607 #ifdef TARGET_NR_shmdt
5608 case TARGET_NR_shmdt:
5609 ret = do_shmdt(arg1);
5612 case TARGET_NR_fsync:
5613 ret = get_errno(fsync(arg1));
5615 case TARGET_NR_clone:
5616 #if defined(TARGET_SH4)
5617 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5618 #elif defined(TARGET_CRIS)
5619 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5621 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5624 #ifdef __NR_exit_group
5625 /* new thread calls */
5626 case TARGET_NR_exit_group:
5630 gdb_exit(cpu_env, arg1);
5631 ret = get_errno(exit_group(arg1));
5634 case TARGET_NR_setdomainname:
5635 if (!(p = lock_user_string(arg1)))
5637 ret = get_errno(setdomainname(p, arg2));
5638 unlock_user(p, arg1, 0);
5640 case TARGET_NR_uname:
5641 /* no need to transcode because we use the linux syscall */
5643 struct new_utsname * buf;
5645 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5647 ret = get_errno(sys_uname(buf));
5648 if (!is_error(ret)) {
5649 /* Overrite the native machine name with whatever is being
5651 strcpy (buf->machine, UNAME_MACHINE);
5652 /* Allow the user to override the reported release. */
5653 if (qemu_uname_release && *qemu_uname_release)
5654 strcpy (buf->release, qemu_uname_release);
5656 unlock_user_struct(buf, arg1, 1);
5660 case TARGET_NR_modify_ldt:
5661 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5663 #if !defined(TARGET_X86_64)
5664 case TARGET_NR_vm86old:
5666 case TARGET_NR_vm86:
5667 ret = do_vm86(cpu_env, arg1, arg2);
5671 case TARGET_NR_adjtimex:
5673 #ifdef TARGET_NR_create_module
5674 case TARGET_NR_create_module:
5676 case TARGET_NR_init_module:
5677 case TARGET_NR_delete_module:
5678 #ifdef TARGET_NR_get_kernel_syms
5679 case TARGET_NR_get_kernel_syms:
5682 case TARGET_NR_quotactl:
5684 case TARGET_NR_getpgid:
5685 ret = get_errno(getpgid(arg1));
5687 case TARGET_NR_fchdir:
5688 ret = get_errno(fchdir(arg1));
5690 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5691 case TARGET_NR_bdflush:
5694 #ifdef TARGET_NR_sysfs
5695 case TARGET_NR_sysfs:
5698 case TARGET_NR_personality:
5699 ret = get_errno(personality(arg1));
5701 #ifdef TARGET_NR_afs_syscall
5702 case TARGET_NR_afs_syscall:
5705 #ifdef TARGET_NR__llseek /* Not on alpha */
5706 case TARGET_NR__llseek:
5708 #if defined (__x86_64__)
5709 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5710 if (put_user_s64(ret, arg4))
5714 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5715 if (put_user_s64(res, arg4))
5721 case TARGET_NR_getdents:
5722 #if TARGET_ABI_BITS != 32
5724 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5726 struct target_dirent *target_dirp;
5727 struct linux_dirent *dirp;
5728 abi_long count = arg3;
5730 dirp = malloc(count);
5732 ret = -TARGET_ENOMEM;
5736 ret = get_errno(sys_getdents(arg1, dirp, count));
5737 if (!is_error(ret)) {
5738 struct linux_dirent *de;
5739 struct target_dirent *tde;
5741 int reclen, treclen;
5742 int count1, tnamelen;
5746 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5750 reclen = de->d_reclen;
5751 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5752 tde->d_reclen = tswap16(treclen);
5753 tde->d_ino = tswapl(de->d_ino);
5754 tde->d_off = tswapl(de->d_off);
5755 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5758 /* XXX: may not be correct */
5759 pstrcpy(tde->d_name, tnamelen, de->d_name);
5760 de = (struct linux_dirent *)((char *)de + reclen);
5762 tde = (struct target_dirent *)((char *)tde + treclen);
5766 unlock_user(target_dirp, arg2, ret);
5772 struct linux_dirent *dirp;
5773 abi_long count = arg3;
5775 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5777 ret = get_errno(sys_getdents(arg1, dirp, count));
5778 if (!is_error(ret)) {
5779 struct linux_dirent *de;
5784 reclen = de->d_reclen;
5787 de->d_reclen = tswap16(reclen);
5788 tswapls(&de->d_ino);
5789 tswapls(&de->d_off);
5790 de = (struct linux_dirent *)((char *)de + reclen);
5794 unlock_user(dirp, arg2, ret);
5798 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5799 case TARGET_NR_getdents64:
5801 struct linux_dirent64 *dirp;
5802 abi_long count = arg3;
5803 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5805 ret = get_errno(sys_getdents64(arg1, dirp, count));
5806 if (!is_error(ret)) {
5807 struct linux_dirent64 *de;
5812 reclen = de->d_reclen;
5815 de->d_reclen = tswap16(reclen);
5816 tswap64s((uint64_t *)&de->d_ino);
5817 tswap64s((uint64_t *)&de->d_off);
5818 de = (struct linux_dirent64 *)((char *)de + reclen);
5822 unlock_user(dirp, arg2, ret);
5825 #endif /* TARGET_NR_getdents64 */
5826 #ifdef TARGET_NR__newselect
5827 case TARGET_NR__newselect:
5828 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5831 #ifdef TARGET_NR_poll
5832 case TARGET_NR_poll:
5834 struct target_pollfd *target_pfd;
5835 unsigned int nfds = arg2;
5840 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5843 pfd = alloca(sizeof(struct pollfd) * nfds);
5844 for(i = 0; i < nfds; i++) {
5845 pfd[i].fd = tswap32(target_pfd[i].fd);
5846 pfd[i].events = tswap16(target_pfd[i].events);
5848 ret = get_errno(poll(pfd, nfds, timeout));
5849 if (!is_error(ret)) {
5850 for(i = 0; i < nfds; i++) {
5851 target_pfd[i].revents = tswap16(pfd[i].revents);
5853 ret += nfds * (sizeof(struct target_pollfd)
5854 - sizeof(struct pollfd));
5856 unlock_user(target_pfd, arg1, ret);
5860 case TARGET_NR_flock:
5861 /* NOTE: the flock constant seems to be the same for every
5863 ret = get_errno(flock(arg1, arg2));
5865 case TARGET_NR_readv:
5870 vec = alloca(count * sizeof(struct iovec));
5871 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5873 ret = get_errno(readv(arg1, vec, count));
5874 unlock_iovec(vec, arg2, count, 1);
5877 case TARGET_NR_writev:
5882 vec = alloca(count * sizeof(struct iovec));
5883 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5885 ret = get_errno(writev(arg1, vec, count));
5886 unlock_iovec(vec, arg2, count, 0);
5889 case TARGET_NR_getsid:
5890 ret = get_errno(getsid(arg1));
5892 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5893 case TARGET_NR_fdatasync:
5894 ret = get_errno(fdatasync(arg1));
5897 case TARGET_NR__sysctl:
5898 /* We don't implement this, but ENOTDIR is always a safe
5900 ret = -TARGET_ENOTDIR;
5902 case TARGET_NR_sched_setparam:
5904 struct sched_param *target_schp;
5905 struct sched_param schp;
5907 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5909 schp.sched_priority = tswap32(target_schp->sched_priority);
5910 unlock_user_struct(target_schp, arg2, 0);
5911 ret = get_errno(sched_setparam(arg1, &schp));
5914 case TARGET_NR_sched_getparam:
5916 struct sched_param *target_schp;
5917 struct sched_param schp;
5918 ret = get_errno(sched_getparam(arg1, &schp));
5919 if (!is_error(ret)) {
5920 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5922 target_schp->sched_priority = tswap32(schp.sched_priority);
5923 unlock_user_struct(target_schp, arg2, 1);
5927 case TARGET_NR_sched_setscheduler:
5929 struct sched_param *target_schp;
5930 struct sched_param schp;
5931 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5933 schp.sched_priority = tswap32(target_schp->sched_priority);
5934 unlock_user_struct(target_schp, arg3, 0);
5935 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5938 case TARGET_NR_sched_getscheduler:
5939 ret = get_errno(sched_getscheduler(arg1));
5941 case TARGET_NR_sched_yield:
5942 ret = get_errno(sched_yield());
5944 case TARGET_NR_sched_get_priority_max:
5945 ret = get_errno(sched_get_priority_max(arg1));
5947 case TARGET_NR_sched_get_priority_min:
5948 ret = get_errno(sched_get_priority_min(arg1));
5950 case TARGET_NR_sched_rr_get_interval:
5953 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5954 if (!is_error(ret)) {
5955 host_to_target_timespec(arg2, &ts);
5959 case TARGET_NR_nanosleep:
5961 struct timespec req, rem;
5962 target_to_host_timespec(&req, arg1);
5963 ret = get_errno(nanosleep(&req, &rem));
5964 if (is_error(ret) && arg2) {
5965 host_to_target_timespec(arg2, &rem);
5969 #ifdef TARGET_NR_query_module
5970 case TARGET_NR_query_module:
5973 #ifdef TARGET_NR_nfsservctl
5974 case TARGET_NR_nfsservctl:
5977 case TARGET_NR_prctl:
5980 case PR_GET_PDEATHSIG:
5983 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5984 if (!is_error(ret) && arg2
5985 && put_user_ual(deathsig, arg2))
5990 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5994 #ifdef TARGET_NR_arch_prctl
5995 case TARGET_NR_arch_prctl:
5996 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5997 ret = do_arch_prctl(cpu_env, arg1, arg2);
6003 #ifdef TARGET_NR_pread
6004 case TARGET_NR_pread:
6006 if (((CPUARMState *)cpu_env)->eabi)
6009 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6011 ret = get_errno(pread(arg1, p, arg3, arg4));
6012 unlock_user(p, arg2, ret);
6014 case TARGET_NR_pwrite:
6016 if (((CPUARMState *)cpu_env)->eabi)
6019 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6021 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6022 unlock_user(p, arg2, 0);
6025 #ifdef TARGET_NR_pread64
6026 case TARGET_NR_pread64:
6027 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6029 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6030 unlock_user(p, arg2, ret);
6032 case TARGET_NR_pwrite64:
6033 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6035 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6036 unlock_user(p, arg2, 0);
6039 case TARGET_NR_getcwd:
6040 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6042 ret = get_errno(sys_getcwd1(p, arg2));
6043 unlock_user(p, arg1, ret);
6045 case TARGET_NR_capget:
6047 case TARGET_NR_capset:
6049 case TARGET_NR_sigaltstack:
6050 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6051 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6052 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6057 case TARGET_NR_sendfile:
6059 #ifdef TARGET_NR_getpmsg
6060 case TARGET_NR_getpmsg:
6063 #ifdef TARGET_NR_putpmsg
6064 case TARGET_NR_putpmsg:
6067 #ifdef TARGET_NR_vfork
6068 case TARGET_NR_vfork:
6069 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6073 #ifdef TARGET_NR_ugetrlimit
6074 case TARGET_NR_ugetrlimit:
6077 ret = get_errno(getrlimit(arg1, &rlim));
6078 if (!is_error(ret)) {
6079 struct target_rlimit *target_rlim;
6080 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6082 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
6083 target_rlim->rlim_max = tswapl(rlim.rlim_max);
6084 unlock_user_struct(target_rlim, arg2, 1);
6089 #ifdef TARGET_NR_truncate64
6090 case TARGET_NR_truncate64:
6091 if (!(p = lock_user_string(arg1)))
6093 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6094 unlock_user(p, arg1, 0);
6097 #ifdef TARGET_NR_ftruncate64
6098 case TARGET_NR_ftruncate64:
6099 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6102 #ifdef TARGET_NR_stat64
6103 case TARGET_NR_stat64:
6104 if (!(p = lock_user_string(arg1)))
6106 ret = get_errno(stat(path(p), &st));
6107 unlock_user(p, arg1, 0);
6109 ret = host_to_target_stat64(cpu_env, arg2, &st);
6112 #ifdef TARGET_NR_lstat64
6113 case TARGET_NR_lstat64:
6114 if (!(p = lock_user_string(arg1)))
6116 ret = get_errno(lstat(path(p), &st));
6117 unlock_user(p, arg1, 0);
6119 ret = host_to_target_stat64(cpu_env, arg2, &st);
6122 #ifdef TARGET_NR_fstat64
6123 case TARGET_NR_fstat64:
6124 ret = get_errno(fstat(arg1, &st));
6126 ret = host_to_target_stat64(cpu_env, arg2, &st);
6129 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6130 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6131 #ifdef TARGET_NR_fstatat64
6132 case TARGET_NR_fstatat64:
6134 #ifdef TARGET_NR_newfstatat
6135 case TARGET_NR_newfstatat:
6137 if (!(p = lock_user_string(arg2)))
6139 #ifdef __NR_fstatat64
6140 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6142 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6145 ret = host_to_target_stat64(cpu_env, arg3, &st);
6149 case TARGET_NR_lchown:
6150 if (!(p = lock_user_string(arg1)))
6152 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6153 unlock_user(p, arg1, 0);
6155 case TARGET_NR_getuid:
6156 ret = get_errno(high2lowuid(getuid()));
6158 case TARGET_NR_getgid:
6159 ret = get_errno(high2lowgid(getgid()));
6161 case TARGET_NR_geteuid:
6162 ret = get_errno(high2lowuid(geteuid()));
6164 case TARGET_NR_getegid:
6165 ret = get_errno(high2lowgid(getegid()));
6167 case TARGET_NR_setreuid:
6168 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6170 case TARGET_NR_setregid:
6171 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6173 case TARGET_NR_getgroups:
6175 int gidsetsize = arg1;
6176 uint16_t *target_grouplist;
6180 grouplist = alloca(gidsetsize * sizeof(gid_t));
6181 ret = get_errno(getgroups(gidsetsize, grouplist));
6182 if (gidsetsize == 0)
6184 if (!is_error(ret)) {
6185 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6186 if (!target_grouplist)
6188 for(i = 0;i < ret; i++)
6189 target_grouplist[i] = tswap16(grouplist[i]);
6190 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6194 case TARGET_NR_setgroups:
6196 int gidsetsize = arg1;
6197 uint16_t *target_grouplist;
6201 grouplist = alloca(gidsetsize * sizeof(gid_t));
6202 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6203 if (!target_grouplist) {
6204 ret = -TARGET_EFAULT;
6207 for(i = 0;i < gidsetsize; i++)
6208 grouplist[i] = tswap16(target_grouplist[i]);
6209 unlock_user(target_grouplist, arg2, 0);
6210 ret = get_errno(setgroups(gidsetsize, grouplist));
6213 case TARGET_NR_fchown:
6214 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6216 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6217 case TARGET_NR_fchownat:
6218 if (!(p = lock_user_string(arg2)))
6220 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6221 unlock_user(p, arg2, 0);
6224 #ifdef TARGET_NR_setresuid
6225 case TARGET_NR_setresuid:
6226 ret = get_errno(setresuid(low2highuid(arg1),
6228 low2highuid(arg3)));
6231 #ifdef TARGET_NR_getresuid
6232 case TARGET_NR_getresuid:
6234 uid_t ruid, euid, suid;
6235 ret = get_errno(getresuid(&ruid, &euid, &suid));
6236 if (!is_error(ret)) {
6237 if (put_user_u16(high2lowuid(ruid), arg1)
6238 || put_user_u16(high2lowuid(euid), arg2)
6239 || put_user_u16(high2lowuid(suid), arg3))
6245 #ifdef TARGET_NR_getresgid
6246 case TARGET_NR_setresgid:
6247 ret = get_errno(setresgid(low2highgid(arg1),
6249 low2highgid(arg3)));
6252 #ifdef TARGET_NR_getresgid
6253 case TARGET_NR_getresgid:
6255 gid_t rgid, egid, sgid;
6256 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6257 if (!is_error(ret)) {
6258 if (put_user_u16(high2lowgid(rgid), arg1)
6259 || put_user_u16(high2lowgid(egid), arg2)
6260 || put_user_u16(high2lowgid(sgid), arg3))
6266 case TARGET_NR_chown:
6267 if (!(p = lock_user_string(arg1)))
6269 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6270 unlock_user(p, arg1, 0);
6272 case TARGET_NR_setuid:
6273 ret = get_errno(setuid(low2highuid(arg1)));
6275 case TARGET_NR_setgid:
6276 ret = get_errno(setgid(low2highgid(arg1)));
6278 case TARGET_NR_setfsuid:
6279 ret = get_errno(setfsuid(arg1));
6281 case TARGET_NR_setfsgid:
6282 ret = get_errno(setfsgid(arg1));
6284 #endif /* USE_UID16 */
6286 #ifdef TARGET_NR_lchown32
6287 case TARGET_NR_lchown32:
6288 if (!(p = lock_user_string(arg1)))
6290 ret = get_errno(lchown(p, arg2, arg3));
6291 unlock_user(p, arg1, 0);
6294 #ifdef TARGET_NR_getuid32
6295 case TARGET_NR_getuid32:
6296 ret = get_errno(getuid());
6300 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6301 /* Alpha specific */
6302 case TARGET_NR_getxuid:
6306 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6308 ret = get_errno(getuid());
6311 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6312 /* Alpha specific */
6313 case TARGET_NR_getxgid:
6317 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6319 ret = get_errno(getgid());
6323 #ifdef TARGET_NR_getgid32
6324 case TARGET_NR_getgid32:
6325 ret = get_errno(getgid());
6328 #ifdef TARGET_NR_geteuid32
6329 case TARGET_NR_geteuid32:
6330 ret = get_errno(geteuid());
6333 #ifdef TARGET_NR_getegid32
6334 case TARGET_NR_getegid32:
6335 ret = get_errno(getegid());
6338 #ifdef TARGET_NR_setreuid32
6339 case TARGET_NR_setreuid32:
6340 ret = get_errno(setreuid(arg1, arg2));
6343 #ifdef TARGET_NR_setregid32
6344 case TARGET_NR_setregid32:
6345 ret = get_errno(setregid(arg1, arg2));
6348 #ifdef TARGET_NR_getgroups32
6349 case TARGET_NR_getgroups32:
6351 int gidsetsize = arg1;
6352 uint32_t *target_grouplist;
6356 grouplist = alloca(gidsetsize * sizeof(gid_t));
6357 ret = get_errno(getgroups(gidsetsize, grouplist));
6358 if (gidsetsize == 0)
6360 if (!is_error(ret)) {
6361 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6362 if (!target_grouplist) {
6363 ret = -TARGET_EFAULT;
6366 for(i = 0;i < ret; i++)
6367 target_grouplist[i] = tswap32(grouplist[i]);
6368 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6373 #ifdef TARGET_NR_setgroups32
6374 case TARGET_NR_setgroups32:
6376 int gidsetsize = arg1;
6377 uint32_t *target_grouplist;
6381 grouplist = alloca(gidsetsize * sizeof(gid_t));
6382 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6383 if (!target_grouplist) {
6384 ret = -TARGET_EFAULT;
6387 for(i = 0;i < gidsetsize; i++)
6388 grouplist[i] = tswap32(target_grouplist[i]);
6389 unlock_user(target_grouplist, arg2, 0);
6390 ret = get_errno(setgroups(gidsetsize, grouplist));
6394 #ifdef TARGET_NR_fchown32
6395 case TARGET_NR_fchown32:
6396 ret = get_errno(fchown(arg1, arg2, arg3));
6399 #ifdef TARGET_NR_setresuid32
6400 case TARGET_NR_setresuid32:
6401 ret = get_errno(setresuid(arg1, arg2, arg3));
6404 #ifdef TARGET_NR_getresuid32
6405 case TARGET_NR_getresuid32:
6407 uid_t ruid, euid, suid;
6408 ret = get_errno(getresuid(&ruid, &euid, &suid));
6409 if (!is_error(ret)) {
6410 if (put_user_u32(ruid, arg1)
6411 || put_user_u32(euid, arg2)
6412 || put_user_u32(suid, arg3))
6418 #ifdef TARGET_NR_setresgid32
6419 case TARGET_NR_setresgid32:
6420 ret = get_errno(setresgid(arg1, arg2, arg3));
6423 #ifdef TARGET_NR_getresgid32
6424 case TARGET_NR_getresgid32:
6426 gid_t rgid, egid, sgid;
6427 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6428 if (!is_error(ret)) {
6429 if (put_user_u32(rgid, arg1)
6430 || put_user_u32(egid, arg2)
6431 || put_user_u32(sgid, arg3))
6437 #ifdef TARGET_NR_chown32
6438 case TARGET_NR_chown32:
6439 if (!(p = lock_user_string(arg1)))
6441 ret = get_errno(chown(p, arg2, arg3));
6442 unlock_user(p, arg1, 0);
6445 #ifdef TARGET_NR_setuid32
6446 case TARGET_NR_setuid32:
6447 ret = get_errno(setuid(arg1));
6450 #ifdef TARGET_NR_setgid32
6451 case TARGET_NR_setgid32:
6452 ret = get_errno(setgid(arg1));
6455 #ifdef TARGET_NR_setfsuid32
6456 case TARGET_NR_setfsuid32:
6457 ret = get_errno(setfsuid(arg1));
6460 #ifdef TARGET_NR_setfsgid32
6461 case TARGET_NR_setfsgid32:
6462 ret = get_errno(setfsgid(arg1));
6466 case TARGET_NR_pivot_root:
6468 #ifdef TARGET_NR_mincore
6469 case TARGET_NR_mincore:
6472 ret = -TARGET_EFAULT;
6473 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6475 if (!(p = lock_user_string(arg3)))
6477 ret = get_errno(mincore(a, arg2, p));
6478 unlock_user(p, arg3, ret);
6480 unlock_user(a, arg1, 0);
6484 #ifdef TARGET_NR_arm_fadvise64_64
6485 case TARGET_NR_arm_fadvise64_64:
6488 * arm_fadvise64_64 looks like fadvise64_64 but
6489 * with different argument order
6497 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6498 #ifdef TARGET_NR_fadvise64_64
6499 case TARGET_NR_fadvise64_64:
6501 /* This is a hint, so ignoring and returning success is ok. */
6505 #ifdef TARGET_NR_madvise
6506 case TARGET_NR_madvise:
6507 /* A straight passthrough may not be safe because qemu sometimes
6508 turns private flie-backed mappings into anonymous mappings.
6509 This will break MADV_DONTNEED.
6510 This is a hint, so ignoring and returning success is ok. */
6514 #if TARGET_ABI_BITS == 32
6515 case TARGET_NR_fcntl64:
6519 struct target_flock64 *target_fl;
6521 struct target_eabi_flock64 *target_efl;
6525 case TARGET_F_GETLK64:
6528 case TARGET_F_SETLK64:
6531 case TARGET_F_SETLKW64:
6540 case TARGET_F_GETLK64:
6542 if (((CPUARMState *)cpu_env)->eabi) {
6543 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6545 fl.l_type = tswap16(target_efl->l_type);
6546 fl.l_whence = tswap16(target_efl->l_whence);
6547 fl.l_start = tswap64(target_efl->l_start);
6548 fl.l_len = tswap64(target_efl->l_len);
6549 fl.l_pid = tswapl(target_efl->l_pid);
6550 unlock_user_struct(target_efl, arg3, 0);
6554 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6556 fl.l_type = tswap16(target_fl->l_type);
6557 fl.l_whence = tswap16(target_fl->l_whence);
6558 fl.l_start = tswap64(target_fl->l_start);
6559 fl.l_len = tswap64(target_fl->l_len);
6560 fl.l_pid = tswapl(target_fl->l_pid);
6561 unlock_user_struct(target_fl, arg3, 0);
6563 ret = get_errno(fcntl(arg1, cmd, &fl));
6566 if (((CPUARMState *)cpu_env)->eabi) {
6567 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6569 target_efl->l_type = tswap16(fl.l_type);
6570 target_efl->l_whence = tswap16(fl.l_whence);
6571 target_efl->l_start = tswap64(fl.l_start);
6572 target_efl->l_len = tswap64(fl.l_len);
6573 target_efl->l_pid = tswapl(fl.l_pid);
6574 unlock_user_struct(target_efl, arg3, 1);
6578 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6580 target_fl->l_type = tswap16(fl.l_type);
6581 target_fl->l_whence = tswap16(fl.l_whence);
6582 target_fl->l_start = tswap64(fl.l_start);
6583 target_fl->l_len = tswap64(fl.l_len);
6584 target_fl->l_pid = tswapl(fl.l_pid);
6585 unlock_user_struct(target_fl, arg3, 1);
6590 case TARGET_F_SETLK64:
6591 case TARGET_F_SETLKW64:
6593 if (((CPUARMState *)cpu_env)->eabi) {
6594 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6596 fl.l_type = tswap16(target_efl->l_type);
6597 fl.l_whence = tswap16(target_efl->l_whence);
6598 fl.l_start = tswap64(target_efl->l_start);
6599 fl.l_len = tswap64(target_efl->l_len);
6600 fl.l_pid = tswapl(target_efl->l_pid);
6601 unlock_user_struct(target_efl, arg3, 0);
6605 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6607 fl.l_type = tswap16(target_fl->l_type);
6608 fl.l_whence = tswap16(target_fl->l_whence);
6609 fl.l_start = tswap64(target_fl->l_start);
6610 fl.l_len = tswap64(target_fl->l_len);
6611 fl.l_pid = tswapl(target_fl->l_pid);
6612 unlock_user_struct(target_fl, arg3, 0);
6614 ret = get_errno(fcntl(arg1, cmd, &fl));
6617 ret = do_fcntl(arg1, arg2, arg3);
6623 #ifdef TARGET_NR_cacheflush
6624 case TARGET_NR_cacheflush:
6625 /* self-modifying code is handled automatically, so nothing needed */
6629 #ifdef TARGET_NR_security
6630 case TARGET_NR_security:
6633 #ifdef TARGET_NR_getpagesize
6634 case TARGET_NR_getpagesize:
6635 ret = TARGET_PAGE_SIZE;
6638 case TARGET_NR_gettid:
6639 ret = get_errno(gettid());
6641 #ifdef TARGET_NR_readahead
6642 case TARGET_NR_readahead:
6643 #if TARGET_ABI_BITS == 32
6645 if (((CPUARMState *)cpu_env)->eabi)
6652 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6654 ret = get_errno(readahead(arg1, arg2, arg3));
6658 #ifdef TARGET_NR_setxattr
6659 case TARGET_NR_setxattr:
6660 case TARGET_NR_lsetxattr:
6661 case TARGET_NR_fsetxattr:
6662 case TARGET_NR_getxattr:
6663 case TARGET_NR_lgetxattr:
6664 case TARGET_NR_fgetxattr:
6665 case TARGET_NR_listxattr:
6666 case TARGET_NR_llistxattr:
6667 case TARGET_NR_flistxattr:
6668 case TARGET_NR_removexattr:
6669 case TARGET_NR_lremovexattr:
6670 case TARGET_NR_fremovexattr:
6671 ret = -TARGET_EOPNOTSUPP;
6674 #ifdef TARGET_NR_set_thread_area
6675 case TARGET_NR_set_thread_area:
6676 #if defined(TARGET_MIPS)
6677 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6680 #elif defined(TARGET_CRIS)
6682 ret = -TARGET_EINVAL;
6684 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6688 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6689 ret = do_set_thread_area(cpu_env, arg1);
6692 goto unimplemented_nowarn;
6695 #ifdef TARGET_NR_get_thread_area
6696 case TARGET_NR_get_thread_area:
6697 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6698 ret = do_get_thread_area(cpu_env, arg1);
6700 goto unimplemented_nowarn;
6703 #ifdef TARGET_NR_getdomainname
6704 case TARGET_NR_getdomainname:
6705 goto unimplemented_nowarn;
6708 #ifdef TARGET_NR_clock_gettime
6709 case TARGET_NR_clock_gettime:
6712 ret = get_errno(clock_gettime(arg1, &ts));
6713 if (!is_error(ret)) {
6714 host_to_target_timespec(arg2, &ts);
6719 #ifdef TARGET_NR_clock_getres
6720 case TARGET_NR_clock_getres:
6723 ret = get_errno(clock_getres(arg1, &ts));
6724 if (!is_error(ret)) {
6725 host_to_target_timespec(arg2, &ts);
6730 #ifdef TARGET_NR_clock_nanosleep
6731 case TARGET_NR_clock_nanosleep:
6734 target_to_host_timespec(&ts, arg3);
6735 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6737 host_to_target_timespec(arg4, &ts);
6742 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6743 case TARGET_NR_set_tid_address:
6744 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6748 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6749 case TARGET_NR_tkill:
6750 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6754 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6755 case TARGET_NR_tgkill:
6756 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6757 target_to_host_signal(arg3)));
6761 #ifdef TARGET_NR_set_robust_list
6762 case TARGET_NR_set_robust_list:
6763 goto unimplemented_nowarn;
6766 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6767 case TARGET_NR_utimensat:
6769 struct timespec *tsp, ts[2];
6773 target_to_host_timespec(ts, arg3);
6774 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6778 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
6780 if (!(p = lock_user_string(arg2))) {
6781 ret = -TARGET_EFAULT;
6784 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
6785 unlock_user(p, arg2, 0);
6790 #if defined(USE_NPTL)
6791 case TARGET_NR_futex:
6792 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6795 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6796 case TARGET_NR_inotify_init:
6797 ret = get_errno(sys_inotify_init());
6800 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6801 case TARGET_NR_inotify_add_watch:
6802 p = lock_user_string(arg2);
6803 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6804 unlock_user(p, arg2, 0);
6807 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6808 case TARGET_NR_inotify_rm_watch:
6809 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6813 #ifdef TARGET_NR_mq_open
6814 case TARGET_NR_mq_open:
6816 struct mq_attr posix_mq_attr;
6818 p = lock_user_string(arg1 - 1);
6820 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6821 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6822 unlock_user (p, arg1, 0);
6826 case TARGET_NR_mq_unlink:
6827 p = lock_user_string(arg1 - 1);
6828 ret = get_errno(mq_unlink(p));
6829 unlock_user (p, arg1, 0);
6832 case TARGET_NR_mq_timedsend:
6836 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6838 target_to_host_timespec(&ts, arg5);
6839 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6840 host_to_target_timespec(arg5, &ts);
6843 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6844 unlock_user (p, arg2, arg3);
6848 case TARGET_NR_mq_timedreceive:
6853 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6855 target_to_host_timespec(&ts, arg5);
6856 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6857 host_to_target_timespec(arg5, &ts);
6860 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6861 unlock_user (p, arg2, arg3);
6863 put_user_u32(prio, arg4);
6867 /* Not implemented for now... */
6868 /* case TARGET_NR_mq_notify: */
6871 case TARGET_NR_mq_getsetattr:
6873 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6876 ret = mq_getattr(arg1, &posix_mq_attr_out);
6877 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6880 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6881 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6890 gemu_log("qemu: Unsupported syscall: %d\n", num);
6891 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6892 unimplemented_nowarn:
6894 ret = -TARGET_ENOSYS;
6899 gemu_log(" = %ld\n", ret);
6902 print_syscall_ret(num, ret);
6905 ret = -TARGET_EFAULT;