4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
49 #include <sys/times.h>
52 #include <sys/statfs.h>
54 #include <sys/sysinfo.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #define termios host_termios
64 #define winsize host_winsize
65 #define termio host_termio
66 #define sgttyb host_sgttyb /* same as target */
67 #define tchars host_tchars /* same as target */
68 #define ltchars host_ltchars /* same as target */
70 #include <linux/termios.h>
71 #include <linux/unistd.h>
72 #include <linux/utsname.h>
73 #include <linux/cdrom.h>
74 #include <linux/hdreg.h>
75 #include <linux/soundcard.h>
77 #include <linux/mtio.h>
78 #include "linux_loop.h"
81 #include "qemu-common.h"
84 #include <linux/futex.h>
85 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
86 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
88 /* XXX: Hardcode the above values. */
89 #define CLONE_NPTL_FLAGS2 0
94 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
95 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
96 /* 16 bit uid wrappers emulation */
100 //#include <linux/msdos_fs.h>
101 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
102 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113 #define _syscall0(type,name) \
114 static type name (void) \
116 return syscall(__NR_##name); \
119 #define _syscall1(type,name,type1,arg1) \
120 static type name (type1 arg1) \
122 return syscall(__NR_##name, arg1); \
125 #define _syscall2(type,name,type1,arg1,type2,arg2) \
126 static type name (type1 arg1,type2 arg2) \
128 return syscall(__NR_##name, arg1, arg2); \
131 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
132 static type name (type1 arg1,type2 arg2,type3 arg3) \
134 return syscall(__NR_##name, arg1, arg2, arg3); \
137 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
138 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
140 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
143 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
151 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 type5,arg5,type6,arg6) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
156 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
160 #define __NR_sys_uname __NR_uname
161 #define __NR_sys_faccessat __NR_faccessat
162 #define __NR_sys_fchmodat __NR_fchmodat
163 #define __NR_sys_fchownat __NR_fchownat
164 #define __NR_sys_fstatat64 __NR_fstatat64
165 #define __NR_sys_futimesat __NR_futimesat
166 #define __NR_sys_getcwd1 __NR_getcwd
167 #define __NR_sys_getdents __NR_getdents
168 #define __NR_sys_getdents64 __NR_getdents64
169 #define __NR_sys_getpriority __NR_getpriority
170 #define __NR_sys_linkat __NR_linkat
171 #define __NR_sys_mkdirat __NR_mkdirat
172 #define __NR_sys_mknodat __NR_mknodat
173 #define __NR_sys_newfstatat __NR_newfstatat
174 #define __NR_sys_openat __NR_openat
175 #define __NR_sys_readlinkat __NR_readlinkat
176 #define __NR_sys_renameat __NR_renameat
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_symlinkat __NR_symlinkat
179 #define __NR_sys_syslog __NR_syslog
180 #define __NR_sys_tgkill __NR_tgkill
181 #define __NR_sys_tkill __NR_tkill
182 #define __NR_sys_unlinkat __NR_unlinkat
183 #define __NR_sys_utimensat __NR_utimensat
184 #define __NR_sys_futex __NR_futex
185 #define __NR_sys_inotify_init __NR_inotify_init
186 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
187 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
189 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
190 #define __NR__llseek __NR_lseek
194 _syscall0(int, gettid)
196 /* This is a replacement for the host gettid() and must return a host
198 static int gettid(void) {
202 _syscall1(int,sys_uname,struct new_utsname *,buf)
203 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
204 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
206 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
207 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
208 mode_t,mode,int,flags)
210 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
211 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
212 uid_t,owner,gid_t,group,int,flags)
214 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
215 defined(__NR_fstatat64)
216 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
217 struct stat *,buf,int,flags)
219 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
220 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
221 const struct timeval *,times)
223 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
224 #if TARGET_ABI_BITS == 32
225 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
228 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
230 _syscall2(int, sys_getpriority, int, which, int, who);
231 #if !defined (__x86_64__)
232 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
233 loff_t *, res, uint, wh);
235 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
236 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
237 int,newdirfd,const char *,newpath,int,flags)
239 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
240 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
242 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
243 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
244 mode_t,mode,dev_t,dev)
246 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
247 defined(__NR_newfstatat)
248 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
249 struct stat *,buf,int,flags)
251 #if defined(TARGET_NR_openat) && defined(__NR_openat)
252 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
254 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
255 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
256 char *,buf,size_t,bufsize)
258 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
259 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
260 int,newdirfd,const char *,newpath)
262 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
263 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
264 _syscall3(int,sys_symlinkat,const char *,oldpath,
265 int,newdirfd,const char *,newpath)
267 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
268 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
269 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
271 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
272 _syscall2(int,sys_tkill,int,tid,int,sig)
274 #ifdef __NR_exit_group
275 _syscall1(int,exit_group,int,error_code)
277 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
278 _syscall1(int,set_tid_address,int *,tidptr)
280 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
281 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
283 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
284 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
285 const struct timespec *,tsp,int,flags)
287 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
288 _syscall0(int,sys_inotify_init)
290 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
291 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
293 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
294 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
296 #if defined(USE_NPTL)
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
299 const struct timespec *,timeout,int *,uaddr2,int,val3)
303 extern int personality(int);
304 extern int flock(int, int);
305 extern int setfsuid(int);
306 extern int setfsgid(int);
307 extern int setgroups(int, gid_t *);
309 #define ERRNO_TABLE_SIZE 1200
311 /* target_to_host_errno_table[] is initialized from
312 * host_to_target_errno_table[] in syscall_init(). */
313 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
317 * This list is the union of errno values overridden in asm-<arch>/errno.h
318 * minus the errnos that are not actually generic to all archs.
320 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
321 [EIDRM] = TARGET_EIDRM,
322 [ECHRNG] = TARGET_ECHRNG,
323 [EL2NSYNC] = TARGET_EL2NSYNC,
324 [EL3HLT] = TARGET_EL3HLT,
325 [EL3RST] = TARGET_EL3RST,
326 [ELNRNG] = TARGET_ELNRNG,
327 [EUNATCH] = TARGET_EUNATCH,
328 [ENOCSI] = TARGET_ENOCSI,
329 [EL2HLT] = TARGET_EL2HLT,
330 [EDEADLK] = TARGET_EDEADLK,
331 [ENOLCK] = TARGET_ENOLCK,
332 [EBADE] = TARGET_EBADE,
333 [EBADR] = TARGET_EBADR,
334 [EXFULL] = TARGET_EXFULL,
335 [ENOANO] = TARGET_ENOANO,
336 [EBADRQC] = TARGET_EBADRQC,
337 [EBADSLT] = TARGET_EBADSLT,
338 [EBFONT] = TARGET_EBFONT,
339 [ENOSTR] = TARGET_ENOSTR,
340 [ENODATA] = TARGET_ENODATA,
341 [ETIME] = TARGET_ETIME,
342 [ENOSR] = TARGET_ENOSR,
343 [ENONET] = TARGET_ENONET,
344 [ENOPKG] = TARGET_ENOPKG,
345 [EREMOTE] = TARGET_EREMOTE,
346 [ENOLINK] = TARGET_ENOLINK,
347 [EADV] = TARGET_EADV,
348 [ESRMNT] = TARGET_ESRMNT,
349 [ECOMM] = TARGET_ECOMM,
350 [EPROTO] = TARGET_EPROTO,
351 [EDOTDOT] = TARGET_EDOTDOT,
352 [EMULTIHOP] = TARGET_EMULTIHOP,
353 [EBADMSG] = TARGET_EBADMSG,
354 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
355 [EOVERFLOW] = TARGET_EOVERFLOW,
356 [ENOTUNIQ] = TARGET_ENOTUNIQ,
357 [EBADFD] = TARGET_EBADFD,
358 [EREMCHG] = TARGET_EREMCHG,
359 [ELIBACC] = TARGET_ELIBACC,
360 [ELIBBAD] = TARGET_ELIBBAD,
361 [ELIBSCN] = TARGET_ELIBSCN,
362 [ELIBMAX] = TARGET_ELIBMAX,
363 [ELIBEXEC] = TARGET_ELIBEXEC,
364 [EILSEQ] = TARGET_EILSEQ,
365 [ENOSYS] = TARGET_ENOSYS,
366 [ELOOP] = TARGET_ELOOP,
367 [ERESTART] = TARGET_ERESTART,
368 [ESTRPIPE] = TARGET_ESTRPIPE,
369 [ENOTEMPTY] = TARGET_ENOTEMPTY,
370 [EUSERS] = TARGET_EUSERS,
371 [ENOTSOCK] = TARGET_ENOTSOCK,
372 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
373 [EMSGSIZE] = TARGET_EMSGSIZE,
374 [EPROTOTYPE] = TARGET_EPROTOTYPE,
375 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
376 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
377 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
378 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
379 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
380 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
381 [EADDRINUSE] = TARGET_EADDRINUSE,
382 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
383 [ENETDOWN] = TARGET_ENETDOWN,
384 [ENETUNREACH] = TARGET_ENETUNREACH,
385 [ENETRESET] = TARGET_ENETRESET,
386 [ECONNABORTED] = TARGET_ECONNABORTED,
387 [ECONNRESET] = TARGET_ECONNRESET,
388 [ENOBUFS] = TARGET_ENOBUFS,
389 [EISCONN] = TARGET_EISCONN,
390 [ENOTCONN] = TARGET_ENOTCONN,
391 [EUCLEAN] = TARGET_EUCLEAN,
392 [ENOTNAM] = TARGET_ENOTNAM,
393 [ENAVAIL] = TARGET_ENAVAIL,
394 [EISNAM] = TARGET_EISNAM,
395 [EREMOTEIO] = TARGET_EREMOTEIO,
396 [ESHUTDOWN] = TARGET_ESHUTDOWN,
397 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
398 [ETIMEDOUT] = TARGET_ETIMEDOUT,
399 [ECONNREFUSED] = TARGET_ECONNREFUSED,
400 [EHOSTDOWN] = TARGET_EHOSTDOWN,
401 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
402 [EALREADY] = TARGET_EALREADY,
403 [EINPROGRESS] = TARGET_EINPROGRESS,
404 [ESTALE] = TARGET_ESTALE,
405 [ECANCELED] = TARGET_ECANCELED,
406 [ENOMEDIUM] = TARGET_ENOMEDIUM,
407 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
409 [ENOKEY] = TARGET_ENOKEY,
412 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
415 [EKEYREVOKED] = TARGET_EKEYREVOKED,
418 [EKEYREJECTED] = TARGET_EKEYREJECTED,
421 [EOWNERDEAD] = TARGET_EOWNERDEAD,
423 #ifdef ENOTRECOVERABLE
424 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
428 static inline int host_to_target_errno(int err)
430 if(host_to_target_errno_table[err])
431 return host_to_target_errno_table[err];
435 static inline int target_to_host_errno(int err)
437 if (target_to_host_errno_table[err])
438 return target_to_host_errno_table[err];
442 static inline abi_long get_errno(abi_long ret)
445 return -host_to_target_errno(errno);
450 static inline int is_error(abi_long ret)
452 return (abi_ulong)ret >= (abi_ulong)(-4096);
455 char *target_strerror(int err)
457 return strerror(target_to_host_errno(err));
460 static abi_ulong target_brk;
461 static abi_ulong target_original_brk;
463 void target_set_brk(abi_ulong new_brk)
465 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
468 /* do_brk() must return target values and target errnos. */
469 abi_long do_brk(abi_ulong new_brk)
472 abi_long mapped_addr;
477 if (new_brk < target_original_brk)
480 brk_page = HOST_PAGE_ALIGN(target_brk);
482 /* If the new brk is less than this, set it and we're done... */
483 if (new_brk < brk_page) {
484 target_brk = new_brk;
488 /* We need to allocate more memory after the brk... */
489 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
490 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
491 PROT_READ|PROT_WRITE,
492 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
494 if (!is_error(mapped_addr))
495 target_brk = new_brk;
500 static inline abi_long copy_from_user_fdset(fd_set *fds,
501 abi_ulong target_fds_addr,
505 abi_ulong b, *target_fds;
507 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
508 if (!(target_fds = lock_user(VERIFY_READ,
510 sizeof(abi_ulong) * nw,
512 return -TARGET_EFAULT;
516 for (i = 0; i < nw; i++) {
517 /* grab the abi_ulong */
518 __get_user(b, &target_fds[i]);
519 for (j = 0; j < TARGET_ABI_BITS; j++) {
520 /* check the bit inside the abi_ulong */
527 unlock_user(target_fds, target_fds_addr, 0);
532 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
538 abi_ulong *target_fds;
540 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
541 if (!(target_fds = lock_user(VERIFY_WRITE,
543 sizeof(abi_ulong) * nw,
545 return -TARGET_EFAULT;
548 for (i = 0; i < nw; i++) {
550 for (j = 0; j < TARGET_ABI_BITS; j++) {
551 v |= ((FD_ISSET(k, fds) != 0) << j);
554 __put_user(v, &target_fds[i]);
557 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
562 #if defined(__alpha__)
568 static inline abi_long host_to_target_clock_t(long ticks)
570 #if HOST_HZ == TARGET_HZ
573 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
577 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
578 const struct rusage *rusage)
580 struct target_rusage *target_rusage;
582 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
583 return -TARGET_EFAULT;
584 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
585 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
586 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
587 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
588 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
589 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
590 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
591 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
592 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
593 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
594 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
595 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
596 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
597 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
598 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
599 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
600 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
601 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
602 unlock_user_struct(target_rusage, target_addr, 1);
607 static inline abi_long copy_from_user_timeval(struct timeval *tv,
608 abi_ulong target_tv_addr)
610 struct target_timeval *target_tv;
612 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
613 return -TARGET_EFAULT;
615 __get_user(tv->tv_sec, &target_tv->tv_sec);
616 __get_user(tv->tv_usec, &target_tv->tv_usec);
618 unlock_user_struct(target_tv, target_tv_addr, 0);
623 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
624 const struct timeval *tv)
626 struct target_timeval *target_tv;
628 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
629 return -TARGET_EFAULT;
631 __put_user(tv->tv_sec, &target_tv->tv_sec);
632 __put_user(tv->tv_usec, &target_tv->tv_usec);
634 unlock_user_struct(target_tv, target_tv_addr, 1);
639 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
640 abi_ulong target_mq_attr_addr)
642 struct target_mq_attr *target_mq_attr;
644 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
645 target_mq_attr_addr, 1))
646 return -TARGET_EFAULT;
648 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
649 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
650 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
651 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
653 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
658 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
659 const struct mq_attr *attr)
661 struct target_mq_attr *target_mq_attr;
663 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
664 target_mq_attr_addr, 0))
665 return -TARGET_EFAULT;
667 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
668 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
669 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
670 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
672 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
677 /* do_select() must return target values and target errnos. */
678 static abi_long do_select(int n,
679 abi_ulong rfd_addr, abi_ulong wfd_addr,
680 abi_ulong efd_addr, abi_ulong target_tv_addr)
682 fd_set rfds, wfds, efds;
683 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
684 struct timeval tv, *tv_ptr;
688 if (copy_from_user_fdset(&rfds, rfd_addr, n))
689 return -TARGET_EFAULT;
695 if (copy_from_user_fdset(&wfds, wfd_addr, n))
696 return -TARGET_EFAULT;
702 if (copy_from_user_fdset(&efds, efd_addr, n))
703 return -TARGET_EFAULT;
709 if (target_tv_addr) {
710 if (copy_from_user_timeval(&tv, target_tv_addr))
711 return -TARGET_EFAULT;
717 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
719 if (!is_error(ret)) {
720 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
721 return -TARGET_EFAULT;
722 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
723 return -TARGET_EFAULT;
724 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
725 return -TARGET_EFAULT;
727 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
728 return -TARGET_EFAULT;
734 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
735 abi_ulong target_addr,
738 struct target_sockaddr *target_saddr;
740 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
742 return -TARGET_EFAULT;
743 memcpy(addr, target_saddr, len);
744 addr->sa_family = tswap16(target_saddr->sa_family);
745 unlock_user(target_saddr, target_addr, 0);
750 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
751 struct sockaddr *addr,
754 struct target_sockaddr *target_saddr;
756 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
758 return -TARGET_EFAULT;
759 memcpy(target_saddr, addr, len);
760 target_saddr->sa_family = tswap16(addr->sa_family);
761 unlock_user(target_saddr, target_addr, len);
766 /* ??? Should this also swap msgh->name? */
767 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
768 struct target_msghdr *target_msgh)
770 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
771 abi_long msg_controllen;
772 abi_ulong target_cmsg_addr;
773 struct target_cmsghdr *target_cmsg;
776 msg_controllen = tswapl(target_msgh->msg_controllen);
777 if (msg_controllen < sizeof (struct target_cmsghdr))
779 target_cmsg_addr = tswapl(target_msgh->msg_control);
780 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
782 return -TARGET_EFAULT;
784 while (cmsg && target_cmsg) {
785 void *data = CMSG_DATA(cmsg);
786 void *target_data = TARGET_CMSG_DATA(target_cmsg);
788 int len = tswapl(target_cmsg->cmsg_len)
789 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
791 space += CMSG_SPACE(len);
792 if (space > msgh->msg_controllen) {
793 space -= CMSG_SPACE(len);
794 gemu_log("Host cmsg overflow\n");
798 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
799 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
800 cmsg->cmsg_len = CMSG_LEN(len);
802 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
803 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
804 memcpy(data, target_data, len);
806 int *fd = (int *)data;
807 int *target_fd = (int *)target_data;
808 int i, numfds = len / sizeof(int);
810 for (i = 0; i < numfds; i++)
811 fd[i] = tswap32(target_fd[i]);
814 cmsg = CMSG_NXTHDR(msgh, cmsg);
815 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
817 unlock_user(target_cmsg, target_cmsg_addr, 0);
819 msgh->msg_controllen = space;
823 /* ??? Should this also swap msgh->name? */
824 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
827 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
828 abi_long msg_controllen;
829 abi_ulong target_cmsg_addr;
830 struct target_cmsghdr *target_cmsg;
833 msg_controllen = tswapl(target_msgh->msg_controllen);
834 if (msg_controllen < sizeof (struct target_cmsghdr))
836 target_cmsg_addr = tswapl(target_msgh->msg_control);
837 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
839 return -TARGET_EFAULT;
841 while (cmsg && target_cmsg) {
842 void *data = CMSG_DATA(cmsg);
843 void *target_data = TARGET_CMSG_DATA(target_cmsg);
845 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
847 space += TARGET_CMSG_SPACE(len);
848 if (space > msg_controllen) {
849 space -= TARGET_CMSG_SPACE(len);
850 gemu_log("Target cmsg overflow\n");
854 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
855 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
856 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
858 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
859 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
860 memcpy(target_data, data, len);
862 int *fd = (int *)data;
863 int *target_fd = (int *)target_data;
864 int i, numfds = len / sizeof(int);
866 for (i = 0; i < numfds; i++)
867 target_fd[i] = tswap32(fd[i]);
870 cmsg = CMSG_NXTHDR(msgh, cmsg);
871 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
873 unlock_user(target_cmsg, target_cmsg_addr, space);
875 target_msgh->msg_controllen = tswapl(space);
879 /* do_setsockopt() Must return target values and target errnos. */
880 static abi_long do_setsockopt(int sockfd, int level, int optname,
881 abi_ulong optval_addr, socklen_t optlen)
888 /* TCP options all take an 'int' value. */
889 if (optlen < sizeof(uint32_t))
890 return -TARGET_EINVAL;
892 if (get_user_u32(val, optval_addr))
893 return -TARGET_EFAULT;
894 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
901 case IP_ROUTER_ALERT:
905 case IP_MTU_DISCOVER:
911 case IP_MULTICAST_TTL:
912 case IP_MULTICAST_LOOP:
914 if (optlen >= sizeof(uint32_t)) {
915 if (get_user_u32(val, optval_addr))
916 return -TARGET_EFAULT;
917 } else if (optlen >= 1) {
918 if (get_user_u8(val, optval_addr))
919 return -TARGET_EFAULT;
921 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
927 case TARGET_SOL_SOCKET:
929 /* Options with 'int' argument. */
930 case TARGET_SO_DEBUG:
933 case TARGET_SO_REUSEADDR:
934 optname = SO_REUSEADDR;
939 case TARGET_SO_ERROR:
942 case TARGET_SO_DONTROUTE:
943 optname = SO_DONTROUTE;
945 case TARGET_SO_BROADCAST:
946 optname = SO_BROADCAST;
948 case TARGET_SO_SNDBUF:
951 case TARGET_SO_RCVBUF:
954 case TARGET_SO_KEEPALIVE:
955 optname = SO_KEEPALIVE;
957 case TARGET_SO_OOBINLINE:
958 optname = SO_OOBINLINE;
960 case TARGET_SO_NO_CHECK:
961 optname = SO_NO_CHECK;
963 case TARGET_SO_PRIORITY:
964 optname = SO_PRIORITY;
967 case TARGET_SO_BSDCOMPAT:
968 optname = SO_BSDCOMPAT;
971 case TARGET_SO_PASSCRED:
972 optname = SO_PASSCRED;
974 case TARGET_SO_TIMESTAMP:
975 optname = SO_TIMESTAMP;
977 case TARGET_SO_RCVLOWAT:
978 optname = SO_RCVLOWAT;
980 case TARGET_SO_RCVTIMEO:
981 optname = SO_RCVTIMEO;
983 case TARGET_SO_SNDTIMEO:
984 optname = SO_SNDTIMEO;
990 if (optlen < sizeof(uint32_t))
991 return -TARGET_EINVAL;
993 if (get_user_u32(val, optval_addr))
994 return -TARGET_EFAULT;
995 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
999 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1000 ret = -TARGET_ENOPROTOOPT;
1005 /* do_getsockopt() Must return target values and target errnos. */
1006 static abi_long do_getsockopt(int sockfd, int level, int optname,
1007 abi_ulong optval_addr, abi_ulong optlen)
1014 case TARGET_SOL_SOCKET:
1017 case TARGET_SO_LINGER:
1018 case TARGET_SO_RCVTIMEO:
1019 case TARGET_SO_SNDTIMEO:
1020 case TARGET_SO_PEERCRED:
1021 case TARGET_SO_PEERNAME:
1022 /* These don't just return a single integer */
1029 /* TCP options all take an 'int' value. */
1031 if (get_user_u32(len, optlen))
1032 return -TARGET_EFAULT;
1034 return -TARGET_EINVAL;
1036 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1043 if (put_user_u32(val, optval_addr))
1044 return -TARGET_EFAULT;
1046 if (put_user_u8(val, optval_addr))
1047 return -TARGET_EFAULT;
1049 if (put_user_u32(len, optlen))
1050 return -TARGET_EFAULT;
1057 case IP_ROUTER_ALERT:
1061 case IP_MTU_DISCOVER:
1067 case IP_MULTICAST_TTL:
1068 case IP_MULTICAST_LOOP:
1069 if (get_user_u32(len, optlen))
1070 return -TARGET_EFAULT;
1072 return -TARGET_EINVAL;
1074 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1077 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1079 if (put_user_u32(len, optlen)
1080 || put_user_u8(val, optval_addr))
1081 return -TARGET_EFAULT;
1083 if (len > sizeof(int))
1085 if (put_user_u32(len, optlen)
1086 || put_user_u32(val, optval_addr))
1087 return -TARGET_EFAULT;
1091 ret = -TARGET_ENOPROTOOPT;
1097 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1099 ret = -TARGET_EOPNOTSUPP;
1106 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1107 * other lock functions have a return code of 0 for failure.
1109 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1110 int count, int copy)
1112 struct target_iovec *target_vec;
1116 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1118 return -TARGET_EFAULT;
1119 for(i = 0;i < count; i++) {
1120 base = tswapl(target_vec[i].iov_base);
1121 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1122 if (vec[i].iov_len != 0) {
1123 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1124 /* Don't check lock_user return value. We must call writev even
1125 if a element has invalid base address. */
1127 /* zero length pointer is ignored */
1128 vec[i].iov_base = NULL;
1131 unlock_user (target_vec, target_addr, 0);
1135 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1136 int count, int copy)
1138 struct target_iovec *target_vec;
1142 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1144 return -TARGET_EFAULT;
1145 for(i = 0;i < count; i++) {
1146 if (target_vec[i].iov_base) {
1147 base = tswapl(target_vec[i].iov_base);
1148 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1151 unlock_user (target_vec, target_addr, 0);
1156 /* do_socket() Must return target values and target errnos. */
1157 static abi_long do_socket(int domain, int type, int protocol)
1159 #if defined(TARGET_MIPS)
1161 case TARGET_SOCK_DGRAM:
1164 case TARGET_SOCK_STREAM:
1167 case TARGET_SOCK_RAW:
1170 case TARGET_SOCK_RDM:
1173 case TARGET_SOCK_SEQPACKET:
1174 type = SOCK_SEQPACKET;
1176 case TARGET_SOCK_PACKET:
1181 if (domain == PF_NETLINK)
1182 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1183 return get_errno(socket(domain, type, protocol));
1186 /* MAX_SOCK_ADDR from linux/net/socket.c */
1187 #define MAX_SOCK_ADDR 128
1189 /* do_bind() Must return target values and target errnos. */
1190 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1195 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1196 return -TARGET_EINVAL;
1198 addr = alloca(addrlen);
1200 target_to_host_sockaddr(addr, target_addr, addrlen);
1201 return get_errno(bind(sockfd, addr, addrlen));
1204 /* do_connect() Must return target values and target errnos. */
1205 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1210 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1211 return -TARGET_EINVAL;
1213 addr = alloca(addrlen);
1215 target_to_host_sockaddr(addr, target_addr, addrlen);
1216 return get_errno(connect(sockfd, addr, addrlen));
1219 /* do_sendrecvmsg() Must return target values and target errnos. */
1220 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1221 int flags, int send)
1224 struct target_msghdr *msgp;
1228 abi_ulong target_vec;
1231 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1235 return -TARGET_EFAULT;
1236 if (msgp->msg_name) {
1237 msg.msg_namelen = tswap32(msgp->msg_namelen);
1238 msg.msg_name = alloca(msg.msg_namelen);
1239 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1242 msg.msg_name = NULL;
1243 msg.msg_namelen = 0;
1245 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1246 msg.msg_control = alloca(msg.msg_controllen);
1247 msg.msg_flags = tswap32(msgp->msg_flags);
1249 count = tswapl(msgp->msg_iovlen);
1250 vec = alloca(count * sizeof(struct iovec));
1251 target_vec = tswapl(msgp->msg_iov);
1252 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1253 msg.msg_iovlen = count;
1257 ret = target_to_host_cmsg(&msg, msgp);
1259 ret = get_errno(sendmsg(fd, &msg, flags));
1261 ret = get_errno(recvmsg(fd, &msg, flags));
1262 if (!is_error(ret)) {
1264 ret = host_to_target_cmsg(msgp, &msg);
1269 unlock_iovec(vec, target_vec, count, !send);
1270 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1274 /* do_accept() Must return target values and target errnos. */
1275 static abi_long do_accept(int fd, abi_ulong target_addr,
1276 abi_ulong target_addrlen_addr)
1282 if (get_user_u32(addrlen, target_addrlen_addr))
1283 return -TARGET_EFAULT;
1285 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1286 return -TARGET_EINVAL;
1288 addr = alloca(addrlen);
1290 ret = get_errno(accept(fd, addr, &addrlen));
1291 if (!is_error(ret)) {
1292 host_to_target_sockaddr(target_addr, addr, addrlen);
1293 if (put_user_u32(addrlen, target_addrlen_addr))
1294 ret = -TARGET_EFAULT;
1299 /* do_getpeername() Must return target values and target errnos. */
1300 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1301 abi_ulong target_addrlen_addr)
1307 if (get_user_u32(addrlen, target_addrlen_addr))
1308 return -TARGET_EFAULT;
1310 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1311 return -TARGET_EINVAL;
1313 addr = alloca(addrlen);
1315 ret = get_errno(getpeername(fd, addr, &addrlen));
1316 if (!is_error(ret)) {
1317 host_to_target_sockaddr(target_addr, addr, addrlen);
1318 if (put_user_u32(addrlen, target_addrlen_addr))
1319 ret = -TARGET_EFAULT;
1324 /* do_getsockname() Must return target values and target errnos. */
1325 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1326 abi_ulong target_addrlen_addr)
1332 if (target_addr == 0)
1333 return get_errno(accept(fd, NULL, NULL));
1335 if (get_user_u32(addrlen, target_addrlen_addr))
1336 return -TARGET_EFAULT;
1338 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1339 return -TARGET_EINVAL;
1341 addr = alloca(addrlen);
1343 ret = get_errno(getsockname(fd, addr, &addrlen));
1344 if (!is_error(ret)) {
1345 host_to_target_sockaddr(target_addr, addr, addrlen);
1346 if (put_user_u32(addrlen, target_addrlen_addr))
1347 ret = -TARGET_EFAULT;
1352 /* do_socketpair() Must return target values and target errnos. */
1353 static abi_long do_socketpair(int domain, int type, int protocol,
1354 abi_ulong target_tab_addr)
1359 ret = get_errno(socketpair(domain, type, protocol, tab));
1360 if (!is_error(ret)) {
1361 if (put_user_s32(tab[0], target_tab_addr)
1362 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1363 ret = -TARGET_EFAULT;
1368 /* do_sendto() Must return target values and target errnos. */
1369 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1370 abi_ulong target_addr, socklen_t addrlen)
1376 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1377 return -TARGET_EINVAL;
1379 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1381 return -TARGET_EFAULT;
1383 addr = alloca(addrlen);
1384 target_to_host_sockaddr(addr, target_addr, addrlen);
1385 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1387 ret = get_errno(send(fd, host_msg, len, flags));
1389 unlock_user(host_msg, msg, 0);
1393 /* do_recvfrom() Must return target values and target errnos. */
1394 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1395 abi_ulong target_addr,
1396 abi_ulong target_addrlen)
1403 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1405 return -TARGET_EFAULT;
1407 if (get_user_u32(addrlen, target_addrlen)) {
1408 ret = -TARGET_EFAULT;
1411 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1412 ret = -TARGET_EINVAL;
1415 addr = alloca(addrlen);
1416 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1418 addr = NULL; /* To keep compiler quiet. */
1419 ret = get_errno(recv(fd, host_msg, len, flags));
1421 if (!is_error(ret)) {
1423 host_to_target_sockaddr(target_addr, addr, addrlen);
1424 if (put_user_u32(addrlen, target_addrlen)) {
1425 ret = -TARGET_EFAULT;
1429 unlock_user(host_msg, msg, len);
1432 unlock_user(host_msg, msg, 0);
1437 #ifdef TARGET_NR_socketcall
1438 /* do_socketcall() Must return target values and target errnos. */
1439 static abi_long do_socketcall(int num, abi_ulong vptr)
1442 const int n = sizeof(abi_ulong);
1447 int domain, type, protocol;
1449 if (get_user_s32(domain, vptr)
1450 || get_user_s32(type, vptr + n)
1451 || get_user_s32(protocol, vptr + 2 * n))
1452 return -TARGET_EFAULT;
1454 ret = do_socket(domain, type, protocol);
1460 abi_ulong target_addr;
1463 if (get_user_s32(sockfd, vptr)
1464 || get_user_ual(target_addr, vptr + n)
1465 || get_user_u32(addrlen, vptr + 2 * n))
1466 return -TARGET_EFAULT;
1468 ret = do_bind(sockfd, target_addr, addrlen);
1471 case SOCKOP_connect:
1474 abi_ulong target_addr;
1477 if (get_user_s32(sockfd, vptr)
1478 || get_user_ual(target_addr, vptr + n)
1479 || get_user_u32(addrlen, vptr + 2 * n))
1480 return -TARGET_EFAULT;
1482 ret = do_connect(sockfd, target_addr, addrlen);
1487 int sockfd, backlog;
1489 if (get_user_s32(sockfd, vptr)
1490 || get_user_s32(backlog, vptr + n))
1491 return -TARGET_EFAULT;
1493 ret = get_errno(listen(sockfd, backlog));
1499 abi_ulong target_addr, target_addrlen;
1501 if (get_user_s32(sockfd, vptr)
1502 || get_user_ual(target_addr, vptr + n)
1503 || get_user_u32(target_addrlen, vptr + 2 * n))
1504 return -TARGET_EFAULT;
1506 ret = do_accept(sockfd, target_addr, target_addrlen);
1509 case SOCKOP_getsockname:
1512 abi_ulong target_addr, target_addrlen;
1514 if (get_user_s32(sockfd, vptr)
1515 || get_user_ual(target_addr, vptr + n)
1516 || get_user_u32(target_addrlen, vptr + 2 * n))
1517 return -TARGET_EFAULT;
1519 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1522 case SOCKOP_getpeername:
1525 abi_ulong target_addr, target_addrlen;
1527 if (get_user_s32(sockfd, vptr)
1528 || get_user_ual(target_addr, vptr + n)
1529 || get_user_u32(target_addrlen, vptr + 2 * n))
1530 return -TARGET_EFAULT;
1532 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1535 case SOCKOP_socketpair:
1537 int domain, type, protocol;
1540 if (get_user_s32(domain, vptr)
1541 || get_user_s32(type, vptr + n)
1542 || get_user_s32(protocol, vptr + 2 * n)
1543 || get_user_ual(tab, vptr + 3 * n))
1544 return -TARGET_EFAULT;
1546 ret = do_socketpair(domain, type, protocol, tab);
1556 if (get_user_s32(sockfd, vptr)
1557 || get_user_ual(msg, vptr + n)
1558 || get_user_ual(len, vptr + 2 * n)
1559 || get_user_s32(flags, vptr + 3 * n))
1560 return -TARGET_EFAULT;
1562 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1572 if (get_user_s32(sockfd, vptr)
1573 || get_user_ual(msg, vptr + n)
1574 || get_user_ual(len, vptr + 2 * n)
1575 || get_user_s32(flags, vptr + 3 * n))
1576 return -TARGET_EFAULT;
1578 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1590 if (get_user_s32(sockfd, vptr)
1591 || get_user_ual(msg, vptr + n)
1592 || get_user_ual(len, vptr + 2 * n)
1593 || get_user_s32(flags, vptr + 3 * n)
1594 || get_user_ual(addr, vptr + 4 * n)
1595 || get_user_u32(addrlen, vptr + 5 * n))
1596 return -TARGET_EFAULT;
1598 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1601 case SOCKOP_recvfrom:
1610 if (get_user_s32(sockfd, vptr)
1611 || get_user_ual(msg, vptr + n)
1612 || get_user_ual(len, vptr + 2 * n)
1613 || get_user_s32(flags, vptr + 3 * n)
1614 || get_user_ual(addr, vptr + 4 * n)
1615 || get_user_u32(addrlen, vptr + 5 * n))
1616 return -TARGET_EFAULT;
1618 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1621 case SOCKOP_shutdown:
1625 if (get_user_s32(sockfd, vptr)
1626 || get_user_s32(how, vptr + n))
1627 return -TARGET_EFAULT;
1629 ret = get_errno(shutdown(sockfd, how));
1632 case SOCKOP_sendmsg:
1633 case SOCKOP_recvmsg:
1636 abi_ulong target_msg;
1639 if (get_user_s32(fd, vptr)
1640 || get_user_ual(target_msg, vptr + n)
1641 || get_user_s32(flags, vptr + 2 * n))
1642 return -TARGET_EFAULT;
1644 ret = do_sendrecvmsg(fd, target_msg, flags,
1645 (num == SOCKOP_sendmsg));
1648 case SOCKOP_setsockopt:
1656 if (get_user_s32(sockfd, vptr)
1657 || get_user_s32(level, vptr + n)
1658 || get_user_s32(optname, vptr + 2 * n)
1659 || get_user_ual(optval, vptr + 3 * n)
1660 || get_user_u32(optlen, vptr + 4 * n))
1661 return -TARGET_EFAULT;
1663 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1666 case SOCKOP_getsockopt:
1674 if (get_user_s32(sockfd, vptr)
1675 || get_user_s32(level, vptr + n)
1676 || get_user_s32(optname, vptr + 2 * n)
1677 || get_user_ual(optval, vptr + 3 * n)
1678 || get_user_u32(optlen, vptr + 4 * n))
1679 return -TARGET_EFAULT;
1681 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1685 gemu_log("Unsupported socketcall: %d\n", num);
1686 ret = -TARGET_ENOSYS;
1693 #ifdef TARGET_NR_ipc
1694 #define N_SHM_REGIONS 32
1696 static struct shm_region {
1699 } shm_regions[N_SHM_REGIONS];
1702 struct target_ipc_perm
1709 unsigned short int mode;
1710 unsigned short int __pad1;
1711 unsigned short int __seq;
1712 unsigned short int __pad2;
1713 abi_ulong __unused1;
1714 abi_ulong __unused2;
1717 struct target_semid_ds
1719 struct target_ipc_perm sem_perm;
1720 abi_ulong sem_otime;
1721 abi_ulong __unused1;
1722 abi_ulong sem_ctime;
1723 abi_ulong __unused2;
1724 abi_ulong sem_nsems;
1725 abi_ulong __unused3;
1726 abi_ulong __unused4;
1729 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1730 abi_ulong target_addr)
1732 struct target_ipc_perm *target_ip;
1733 struct target_semid_ds *target_sd;
1735 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1736 return -TARGET_EFAULT;
1737 target_ip=&(target_sd->sem_perm);
1738 host_ip->__key = tswapl(target_ip->__key);
1739 host_ip->uid = tswapl(target_ip->uid);
1740 host_ip->gid = tswapl(target_ip->gid);
1741 host_ip->cuid = tswapl(target_ip->cuid);
1742 host_ip->cgid = tswapl(target_ip->cgid);
1743 host_ip->mode = tswapl(target_ip->mode);
1744 unlock_user_struct(target_sd, target_addr, 0);
1748 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1749 struct ipc_perm *host_ip)
1751 struct target_ipc_perm *target_ip;
1752 struct target_semid_ds *target_sd;
1754 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1755 return -TARGET_EFAULT;
1756 target_ip = &(target_sd->sem_perm);
1757 target_ip->__key = tswapl(host_ip->__key);
1758 target_ip->uid = tswapl(host_ip->uid);
1759 target_ip->gid = tswapl(host_ip->gid);
1760 target_ip->cuid = tswapl(host_ip->cuid);
1761 target_ip->cgid = tswapl(host_ip->cgid);
1762 target_ip->mode = tswapl(host_ip->mode);
1763 unlock_user_struct(target_sd, target_addr, 1);
1767 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1768 abi_ulong target_addr)
1770 struct target_semid_ds *target_sd;
1772 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1773 return -TARGET_EFAULT;
1774 target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr);
1775 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1776 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1777 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1778 unlock_user_struct(target_sd, target_addr, 0);
1782 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1783 struct semid_ds *host_sd)
1785 struct target_semid_ds *target_sd;
1787 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1788 return -TARGET_EFAULT;
1789 host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm));
1790 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1791 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1792 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1793 unlock_user_struct(target_sd, target_addr, 1);
1799 struct semid_ds *buf;
1800 unsigned short *array;
1803 union target_semun {
1806 unsigned short int *array;
1809 static inline abi_long target_to_host_semun(int cmd,
1810 union semun *host_su,
1811 abi_ulong target_addr,
1812 struct semid_ds *ds)
1814 union target_semun *target_su;
1819 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1820 return -TARGET_EFAULT;
1821 target_to_host_semid_ds(ds,target_su->buf);
1823 unlock_user_struct(target_su, target_addr, 0);
1827 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1828 return -TARGET_EFAULT;
1829 host_su->val = tswapl(target_su->val);
1830 unlock_user_struct(target_su, target_addr, 0);
1834 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1835 return -TARGET_EFAULT;
1836 *host_su->array = tswap16(*target_su->array);
1837 unlock_user_struct(target_su, target_addr, 0);
1840 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1845 static inline abi_long host_to_target_semun(int cmd,
1846 abi_ulong target_addr,
1847 union semun *host_su,
1848 struct semid_ds *ds)
1850 union target_semun *target_su;
1855 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1856 return -TARGET_EFAULT;
1857 host_to_target_semid_ds(target_su->buf,ds);
1858 unlock_user_struct(target_su, target_addr, 1);
1862 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1863 return -TARGET_EFAULT;
1864 target_su->val = tswapl(host_su->val);
1865 unlock_user_struct(target_su, target_addr, 1);
1869 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1870 return -TARGET_EFAULT;
1871 *target_su->array = tswap16(*host_su->array);
1872 unlock_user_struct(target_su, target_addr, 1);
1875 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1880 static inline abi_long do_semctl(int first, int second, int third,
1884 struct semid_ds dsarg;
1885 int cmd = third&0xff;
1890 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1891 ret = get_errno(semctl(first, second, cmd, arg));
1892 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1895 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1896 ret = get_errno(semctl(first, second, cmd, arg));
1897 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1900 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1901 ret = get_errno(semctl(first, second, cmd, arg));
1902 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1905 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1906 ret = get_errno(semctl(first, second, cmd, arg));
1907 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1910 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1911 ret = get_errno(semctl(first, second, cmd, arg));
1912 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1915 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1916 ret = get_errno(semctl(first, second, cmd, arg));
1917 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1920 ret = get_errno(semctl(first, second, cmd, arg));
1926 struct target_msqid_ds
1928 struct target_ipc_perm msg_perm;
1929 abi_ulong msg_stime;
1930 #if TARGET_ABI_BITS == 32
1931 abi_ulong __unused1;
1933 abi_ulong msg_rtime;
1934 #if TARGET_ABI_BITS == 32
1935 abi_ulong __unused2;
1937 abi_ulong msg_ctime;
1938 #if TARGET_ABI_BITS == 32
1939 abi_ulong __unused3;
1941 abi_ulong __msg_cbytes;
1943 abi_ulong msg_qbytes;
1944 abi_ulong msg_lspid;
1945 abi_ulong msg_lrpid;
1946 abi_ulong __unused4;
1947 abi_ulong __unused5;
1950 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
1951 abi_ulong target_addr)
1953 struct target_msqid_ds *target_md;
1955 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
1956 return -TARGET_EFAULT;
1957 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
1958 return -TARGET_EFAULT;
1959 host_md->msg_stime = tswapl(target_md->msg_stime);
1960 host_md->msg_rtime = tswapl(target_md->msg_rtime);
1961 host_md->msg_ctime = tswapl(target_md->msg_ctime);
1962 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
1963 host_md->msg_qnum = tswapl(target_md->msg_qnum);
1964 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
1965 host_md->msg_lspid = tswapl(target_md->msg_lspid);
1966 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
1967 unlock_user_struct(target_md, target_addr, 0);
1971 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
1972 struct msqid_ds *host_md)
1974 struct target_msqid_ds *target_md;
1976 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
1977 return -TARGET_EFAULT;
1978 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
1979 return -TARGET_EFAULT;
1980 target_md->msg_stime = tswapl(host_md->msg_stime);
1981 target_md->msg_rtime = tswapl(host_md->msg_rtime);
1982 target_md->msg_ctime = tswapl(host_md->msg_ctime);
1983 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
1984 target_md->msg_qnum = tswapl(host_md->msg_qnum);
1985 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
1986 target_md->msg_lspid = tswapl(host_md->msg_lspid);
1987 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
1988 unlock_user_struct(target_md, target_addr, 1);
1992 struct target_msginfo {
2000 unsigned short int msgseg;
2003 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2004 struct msginfo *host_msginfo)
2006 struct target_msginfo *target_msginfo;
2007 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2008 return -TARGET_EFAULT;
2009 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2010 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2011 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2012 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2013 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2014 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2015 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2016 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2017 unlock_user_struct(target_msginfo, target_addr, 1);
2021 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2023 struct msqid_ds dsarg;
2024 struct msginfo msginfo;
2025 abi_long ret = -TARGET_EINVAL;
2033 if (target_to_host_msqid_ds(&dsarg,ptr))
2034 return -TARGET_EFAULT;
2035 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2036 if (host_to_target_msqid_ds(ptr,&dsarg))
2037 return -TARGET_EFAULT;
2040 ret = get_errno(msgctl(msgid, cmd, NULL));
2044 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2045 if (host_to_target_msginfo(ptr, &msginfo))
2046 return -TARGET_EFAULT;
2053 struct target_msgbuf {
2058 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2059 unsigned int msgsz, int msgflg)
2061 struct target_msgbuf *target_mb;
2062 struct msgbuf *host_mb;
2065 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2066 return -TARGET_EFAULT;
2067 host_mb = malloc(msgsz+sizeof(long));
2068 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2069 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2070 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2072 unlock_user_struct(target_mb, msgp, 0);
2077 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2078 unsigned int msgsz, abi_long msgtyp,
2081 struct target_msgbuf *target_mb;
2083 struct msgbuf *host_mb;
2086 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2087 return -TARGET_EFAULT;
2089 host_mb = malloc(msgsz+sizeof(long));
2090 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2093 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2094 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2095 if (!target_mtext) {
2096 ret = -TARGET_EFAULT;
2099 memcpy(target_mb->mtext, host_mb->mtext, ret);
2100 unlock_user(target_mtext, target_mtext_addr, ret);
2103 target_mb->mtype = tswapl(host_mb->mtype);
2108 unlock_user_struct(target_mb, msgp, 1);
2112 #ifdef TARGET_NR_ipc
2113 /* ??? This only works with linear mappings. */
2114 /* do_ipc() must return target values and target errnos. */
2115 static abi_long do_ipc(unsigned int call, int first,
2116 int second, int third,
2117 abi_long ptr, abi_long fifth)
2121 struct shmid_ds shm_info;
2124 version = call >> 16;
2129 ret = get_errno(semop(first,(struct sembuf *)g2h(ptr), second));
2133 ret = get_errno(semget(first, second, third));
2137 ret = do_semctl(first, second, third, ptr);
2140 case IPCOP_semtimedop:
2141 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2142 ret = -TARGET_ENOSYS;
2146 ret = get_errno(msgget(first, second));
2150 ret = do_msgsnd(first, ptr, second, third);
2154 ret = do_msgctl(first, second, ptr);
2161 struct target_ipc_kludge {
2166 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2167 ret = -TARGET_EFAULT;
2171 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2173 unlock_user_struct(tmp, ptr, 0);
2177 ret = do_msgrcv(first, ptr, second, fifth, third);
2185 /* SHM_* flags are the same on all linux platforms */
2186 host_addr = shmat(first, (void *)g2h(ptr), second);
2187 if (host_addr == (void *)-1) {
2188 ret = get_errno((long)host_addr);
2191 raddr = h2g((unsigned long)host_addr);
2192 /* find out the length of the shared memory segment */
2194 ret = get_errno(shmctl(first, IPC_STAT, &shm_info));
2195 if (is_error(ret)) {
2196 /* can't get length, bail out */
2200 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2201 PAGE_VALID | PAGE_READ |
2202 ((second & SHM_RDONLY)? 0: PAGE_WRITE));
2203 for (i = 0; i < N_SHM_REGIONS; ++i) {
2204 if (shm_regions[i].start == 0) {
2205 shm_regions[i].start = raddr;
2206 shm_regions[i].size = shm_info.shm_segsz;
2210 if (put_user_ual(raddr, third))
2211 return -TARGET_EFAULT;
2216 for (i = 0; i < N_SHM_REGIONS; ++i) {
2217 if (shm_regions[i].start == ptr) {
2218 shm_regions[i].start = 0;
2219 page_set_flags(ptr, shm_regions[i].size, 0);
2223 ret = get_errno(shmdt((void *)g2h(ptr)));
2227 /* IPC_* flag values are the same on all linux platforms */
2228 ret = get_errno(shmget(first, second, third));
2231 /* IPC_* and SHM_* command values are the same on all linux platforms */
2237 ret = get_errno(shmctl(first, second, NULL));
2245 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2246 ret = -TARGET_ENOSYS;
2253 /* kernel structure types definitions */
2256 #define STRUCT(name, list...) STRUCT_ ## name,
2257 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2259 #include "syscall_types.h"
2262 #undef STRUCT_SPECIAL
2264 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2265 #define STRUCT_SPECIAL(name)
2266 #include "syscall_types.h"
2268 #undef STRUCT_SPECIAL
2270 typedef struct IOCTLEntry {
2271 unsigned int target_cmd;
2272 unsigned int host_cmd;
2275 const argtype arg_type[5];
2278 #define IOC_R 0x0001
2279 #define IOC_W 0x0002
2280 #define IOC_RW (IOC_R | IOC_W)
2282 #define MAX_STRUCT_SIZE 4096
2284 static IOCTLEntry ioctl_entries[] = {
2285 #define IOCTL(cmd, access, types...) \
2286 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2291 /* ??? Implement proper locking for ioctls. */
2292 /* do_ioctl() Must return target values and target errnos. */
2293 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2295 const IOCTLEntry *ie;
2296 const argtype *arg_type;
2298 uint8_t buf_temp[MAX_STRUCT_SIZE];
2304 if (ie->target_cmd == 0) {
2305 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2306 return -TARGET_ENOSYS;
2308 if (ie->target_cmd == cmd)
2312 arg_type = ie->arg_type;
2314 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2316 switch(arg_type[0]) {
2319 ret = get_errno(ioctl(fd, ie->host_cmd));
2324 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2328 target_size = thunk_type_size(arg_type, 0);
2329 switch(ie->access) {
2331 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2332 if (!is_error(ret)) {
2333 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2335 return -TARGET_EFAULT;
2336 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2337 unlock_user(argptr, arg, target_size);
2341 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2343 return -TARGET_EFAULT;
2344 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2345 unlock_user(argptr, arg, 0);
2346 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2350 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2352 return -TARGET_EFAULT;
2353 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2354 unlock_user(argptr, arg, 0);
2355 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2356 if (!is_error(ret)) {
2357 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2359 return -TARGET_EFAULT;
2360 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2361 unlock_user(argptr, arg, target_size);
2367 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2368 (long)cmd, arg_type[0]);
2369 ret = -TARGET_ENOSYS;
2375 static const bitmask_transtbl iflag_tbl[] = {
2376 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2377 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2378 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2379 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2380 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2381 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2382 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2383 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2384 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2385 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2386 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2387 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2388 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2389 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2393 static const bitmask_transtbl oflag_tbl[] = {
2394 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2395 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2396 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2397 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2398 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2399 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2400 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2401 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2402 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2403 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2404 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2405 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2406 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2407 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2408 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2409 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2410 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2411 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2412 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2413 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2414 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2415 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2416 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2417 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2421 static const bitmask_transtbl cflag_tbl[] = {
2422 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2423 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2424 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2425 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2426 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2427 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2428 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2429 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2430 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2431 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2432 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2433 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2434 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2435 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2436 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2437 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2438 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2439 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2440 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2441 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2442 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2443 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2444 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2445 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2446 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2447 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2448 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2449 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2450 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2451 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2452 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2456 static const bitmask_transtbl lflag_tbl[] = {
2457 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2458 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2459 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2460 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2461 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2462 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2463 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2464 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2465 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2466 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2467 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2468 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2469 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2470 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2471 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2475 static void target_to_host_termios (void *dst, const void *src)
2477 struct host_termios *host = dst;
2478 const struct target_termios *target = src;
2481 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2483 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2485 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2487 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2488 host->c_line = target->c_line;
2490 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2491 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2492 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2493 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2494 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2495 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2496 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2497 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2498 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2499 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2500 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2501 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2502 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2503 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2504 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2505 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2506 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2509 static void host_to_target_termios (void *dst, const void *src)
2511 struct target_termios *target = dst;
2512 const struct host_termios *host = src;
2515 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2517 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2519 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2521 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2522 target->c_line = host->c_line;
2524 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2525 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2526 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2527 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2528 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2529 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2530 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2531 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2532 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2533 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2534 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2535 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2536 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2537 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2538 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2539 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2540 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2543 static const StructEntry struct_termios_def = {
2544 .convert = { host_to_target_termios, target_to_host_termios },
2545 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2546 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2549 static bitmask_transtbl mmap_flags_tbl[] = {
2550 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2551 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2552 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2553 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2554 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2555 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2556 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2557 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2561 static bitmask_transtbl fcntl_flags_tbl[] = {
2562 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2563 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2564 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2565 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2566 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2567 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2568 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2569 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2570 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2571 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2572 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2573 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2574 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2575 #if defined(O_DIRECT)
2576 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2581 #if defined(TARGET_I386)
2583 /* NOTE: there is really one LDT for all the threads */
2584 static uint8_t *ldt_table;
2586 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2593 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2594 if (size > bytecount)
2596 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2598 return -TARGET_EFAULT;
2599 /* ??? Should this by byteswapped? */
2600 memcpy(p, ldt_table, size);
2601 unlock_user(p, ptr, size);
2605 /* XXX: add locking support */
2606 static abi_long write_ldt(CPUX86State *env,
2607 abi_ulong ptr, unsigned long bytecount, int oldmode)
2609 struct target_modify_ldt_ldt_s ldt_info;
2610 struct target_modify_ldt_ldt_s *target_ldt_info;
2611 int seg_32bit, contents, read_exec_only, limit_in_pages;
2612 int seg_not_present, useable, lm;
2613 uint32_t *lp, entry_1, entry_2;
2615 if (bytecount != sizeof(ldt_info))
2616 return -TARGET_EINVAL;
2617 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2618 return -TARGET_EFAULT;
2619 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2620 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2621 ldt_info.limit = tswap32(target_ldt_info->limit);
2622 ldt_info.flags = tswap32(target_ldt_info->flags);
2623 unlock_user_struct(target_ldt_info, ptr, 0);
2625 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2626 return -TARGET_EINVAL;
2627 seg_32bit = ldt_info.flags & 1;
2628 contents = (ldt_info.flags >> 1) & 3;
2629 read_exec_only = (ldt_info.flags >> 3) & 1;
2630 limit_in_pages = (ldt_info.flags >> 4) & 1;
2631 seg_not_present = (ldt_info.flags >> 5) & 1;
2632 useable = (ldt_info.flags >> 6) & 1;
2636 lm = (ldt_info.flags >> 7) & 1;
2638 if (contents == 3) {
2640 return -TARGET_EINVAL;
2641 if (seg_not_present == 0)
2642 return -TARGET_EINVAL;
2644 /* allocate the LDT */
2646 env->ldt.base = target_mmap(0,
2647 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
2648 PROT_READ|PROT_WRITE,
2649 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
2650 if (env->ldt.base == -1)
2651 return -TARGET_ENOMEM;
2652 memset(g2h(env->ldt.base), 0,
2653 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2654 env->ldt.limit = 0xffff;
2655 ldt_table = g2h(env->ldt.base);
2658 /* NOTE: same code as Linux kernel */
2659 /* Allow LDTs to be cleared by the user. */
2660 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2663 read_exec_only == 1 &&
2665 limit_in_pages == 0 &&
2666 seg_not_present == 1 &&
2674 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2675 (ldt_info.limit & 0x0ffff);
2676 entry_2 = (ldt_info.base_addr & 0xff000000) |
2677 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2678 (ldt_info.limit & 0xf0000) |
2679 ((read_exec_only ^ 1) << 9) |
2681 ((seg_not_present ^ 1) << 15) |
2683 (limit_in_pages << 23) |
2687 entry_2 |= (useable << 20);
2689 /* Install the new entry ... */
2691 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2692 lp[0] = tswap32(entry_1);
2693 lp[1] = tswap32(entry_2);
2697 /* specific and weird i386 syscalls */
2698 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2699 unsigned long bytecount)
2705 ret = read_ldt(ptr, bytecount);
2708 ret = write_ldt(env, ptr, bytecount, 1);
2711 ret = write_ldt(env, ptr, bytecount, 0);
2714 ret = -TARGET_ENOSYS;
2720 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2721 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2723 uint64_t *gdt_table = g2h(env->gdt.base);
2724 struct target_modify_ldt_ldt_s ldt_info;
2725 struct target_modify_ldt_ldt_s *target_ldt_info;
2726 int seg_32bit, contents, read_exec_only, limit_in_pages;
2727 int seg_not_present, useable, lm;
2728 uint32_t *lp, entry_1, entry_2;
2731 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2732 if (!target_ldt_info)
2733 return -TARGET_EFAULT;
2734 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2735 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2736 ldt_info.limit = tswap32(target_ldt_info->limit);
2737 ldt_info.flags = tswap32(target_ldt_info->flags);
2738 if (ldt_info.entry_number == -1) {
2739 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
2740 if (gdt_table[i] == 0) {
2741 ldt_info.entry_number = i;
2742 target_ldt_info->entry_number = tswap32(i);
2747 unlock_user_struct(target_ldt_info, ptr, 1);
2749 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
2750 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
2751 return -TARGET_EINVAL;
2752 seg_32bit = ldt_info.flags & 1;
2753 contents = (ldt_info.flags >> 1) & 3;
2754 read_exec_only = (ldt_info.flags >> 3) & 1;
2755 limit_in_pages = (ldt_info.flags >> 4) & 1;
2756 seg_not_present = (ldt_info.flags >> 5) & 1;
2757 useable = (ldt_info.flags >> 6) & 1;
2761 lm = (ldt_info.flags >> 7) & 1;
2764 if (contents == 3) {
2765 if (seg_not_present == 0)
2766 return -TARGET_EINVAL;
2769 /* NOTE: same code as Linux kernel */
2770 /* Allow LDTs to be cleared by the user. */
2771 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2772 if ((contents == 0 &&
2773 read_exec_only == 1 &&
2775 limit_in_pages == 0 &&
2776 seg_not_present == 1 &&
2784 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2785 (ldt_info.limit & 0x0ffff);
2786 entry_2 = (ldt_info.base_addr & 0xff000000) |
2787 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2788 (ldt_info.limit & 0xf0000) |
2789 ((read_exec_only ^ 1) << 9) |
2791 ((seg_not_present ^ 1) << 15) |
2793 (limit_in_pages << 23) |
2798 /* Install the new entry ... */
2800 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
2801 lp[0] = tswap32(entry_1);
2802 lp[1] = tswap32(entry_2);
2806 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
2808 struct target_modify_ldt_ldt_s *target_ldt_info;
2809 uint64_t *gdt_table = g2h(env->gdt.base);
2810 uint32_t base_addr, limit, flags;
2811 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
2812 int seg_not_present, useable, lm;
2813 uint32_t *lp, entry_1, entry_2;
2815 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2816 if (!target_ldt_info)
2817 return -TARGET_EFAULT;
2818 idx = tswap32(target_ldt_info->entry_number);
2819 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
2820 idx > TARGET_GDT_ENTRY_TLS_MAX) {
2821 unlock_user_struct(target_ldt_info, ptr, 1);
2822 return -TARGET_EINVAL;
2824 lp = (uint32_t *)(gdt_table + idx);
2825 entry_1 = tswap32(lp[0]);
2826 entry_2 = tswap32(lp[1]);
2828 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
2829 contents = (entry_2 >> 10) & 3;
2830 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
2831 seg_32bit = (entry_2 >> 22) & 1;
2832 limit_in_pages = (entry_2 >> 23) & 1;
2833 useable = (entry_2 >> 20) & 1;
2837 lm = (entry_2 >> 21) & 1;
2839 flags = (seg_32bit << 0) | (contents << 1) |
2840 (read_exec_only << 3) | (limit_in_pages << 4) |
2841 (seg_not_present << 5) | (useable << 6) | (lm << 7);
2842 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
2843 base_addr = (entry_1 >> 16) |
2844 (entry_2 & 0xff000000) |
2845 ((entry_2 & 0xff) << 16);
2846 target_ldt_info->base_addr = tswapl(base_addr);
2847 target_ldt_info->limit = tswap32(limit);
2848 target_ldt_info->flags = tswap32(flags);
2849 unlock_user_struct(target_ldt_info, ptr, 1);
2852 #endif /* TARGET_I386 && TARGET_ABI32 */
2854 #ifndef TARGET_ABI32
2855 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
2862 case TARGET_ARCH_SET_GS:
2863 case TARGET_ARCH_SET_FS:
2864 if (code == TARGET_ARCH_SET_GS)
2868 cpu_x86_load_seg(env, idx, 0);
2869 env->segs[idx].base = addr;
2871 case TARGET_ARCH_GET_GS:
2872 case TARGET_ARCH_GET_FS:
2873 if (code == TARGET_ARCH_GET_GS)
2877 val = env->segs[idx].base;
2878 if (put_user(val, addr, abi_ulong))
2879 return -TARGET_EFAULT;
2882 ret = -TARGET_EINVAL;
2889 #endif /* defined(TARGET_I386) */
2891 #if defined(USE_NPTL)
2893 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
2895 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
2898 pthread_mutex_t mutex;
2899 pthread_cond_t cond;
2902 abi_ulong child_tidptr;
2903 abi_ulong parent_tidptr;
2907 static void *clone_func(void *arg)
2909 new_thread_info *info = arg;
2914 info->tid = gettid();
2915 if (info->child_tidptr)
2916 put_user_u32(info->tid, info->child_tidptr);
2917 if (info->parent_tidptr)
2918 put_user_u32(info->tid, info->parent_tidptr);
2919 /* Enable signals. */
2920 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
2921 /* Signal to the parent that we're ready. */
2922 pthread_mutex_lock(&info->mutex);
2923 pthread_cond_broadcast(&info->cond);
2924 pthread_mutex_unlock(&info->mutex);
2925 /* Wait until the parent has finshed initializing the tls state. */
2926 pthread_mutex_lock(&clone_lock);
2927 pthread_mutex_unlock(&clone_lock);
2933 /* this stack is the equivalent of the kernel stack associated with a
2935 #define NEW_STACK_SIZE 8192
2937 static int clone_func(void *arg)
2939 CPUState *env = arg;
2946 /* do_fork() Must return host values and target errnos (unlike most
2947 do_*() functions). */
2948 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
2949 abi_ulong parent_tidptr, target_ulong newtls,
2950 abi_ulong child_tidptr)
2956 #if defined(USE_NPTL)
2957 unsigned int nptl_flags;
2961 /* Emulate vfork() with fork() */
2962 if (flags & CLONE_VFORK)
2963 flags &= ~(CLONE_VFORK | CLONE_VM);
2965 if (flags & CLONE_VM) {
2966 #if defined(USE_NPTL)
2967 new_thread_info info;
2968 pthread_attr_t attr;
2970 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
2971 init_task_state(ts);
2972 new_stack = ts->stack;
2973 /* we create a new CPU instance. */
2974 new_env = cpu_copy(env);
2975 /* Init regs that differ from the parent. */
2976 cpu_clone_regs(new_env, newsp);
2977 new_env->opaque = ts;
2978 #if defined(USE_NPTL)
2980 flags &= ~CLONE_NPTL_FLAGS2;
2982 if (nptl_flags & CLONE_CHILD_CLEARTID) {
2983 ts->child_tidptr = child_tidptr;
2986 if (nptl_flags & CLONE_SETTLS)
2987 cpu_set_tls (new_env, newtls);
2989 /* Grab a mutex so that thread setup appears atomic. */
2990 pthread_mutex_lock(&clone_lock);
2992 memset(&info, 0, sizeof(info));
2993 pthread_mutex_init(&info.mutex, NULL);
2994 pthread_mutex_lock(&info.mutex);
2995 pthread_cond_init(&info.cond, NULL);
2997 if (nptl_flags & CLONE_CHILD_SETTID)
2998 info.child_tidptr = child_tidptr;
2999 if (nptl_flags & CLONE_PARENT_SETTID)
3000 info.parent_tidptr = parent_tidptr;
3002 ret = pthread_attr_init(&attr);
3003 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3004 /* It is not safe to deliver signals until the child has finished
3005 initializing, so temporarily block all signals. */
3006 sigfillset(&sigmask);
3007 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3009 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3010 /* TODO: Free new CPU state if thread creation failed. */
3012 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3013 pthread_attr_destroy(&attr);
3015 /* Wait for the child to initialize. */
3016 pthread_cond_wait(&info.cond, &info.mutex);
3018 if (flags & CLONE_PARENT_SETTID)
3019 put_user_u32(ret, parent_tidptr);
3023 pthread_mutex_unlock(&info.mutex);
3024 pthread_cond_destroy(&info.cond);
3025 pthread_mutex_destroy(&info.mutex);
3026 pthread_mutex_unlock(&clone_lock);
3028 if (flags & CLONE_NPTL_FLAGS2)
3030 /* This is probably going to die very quickly, but do it anyway. */
3032 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3034 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3038 /* if no CLONE_VM, we consider it is a fork */
3039 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3044 /* Child Process. */
3045 cpu_clone_regs(env, newsp);
3047 #if defined(USE_NPTL)
3048 /* There is a race condition here. The parent process could
3049 theoretically read the TID in the child process before the child
3050 tid is set. This would require using either ptrace
3051 (not implemented) or having *_tidptr to point at a shared memory
3052 mapping. We can't repeat the spinlock hack used above because
3053 the child process gets its own copy of the lock. */
3054 if (flags & CLONE_CHILD_SETTID)
3055 put_user_u32(gettid(), child_tidptr);
3056 if (flags & CLONE_PARENT_SETTID)
3057 put_user_u32(gettid(), parent_tidptr);
3058 ts = (TaskState *)env->opaque;
3059 if (flags & CLONE_SETTLS)
3060 cpu_set_tls (env, newtls);
3061 if (flags & CLONE_CHILD_CLEARTID)
3062 ts->child_tidptr = child_tidptr;
3071 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3074 struct target_flock *target_fl;
3075 struct flock64 fl64;
3076 struct target_flock64 *target_fl64;
3080 case TARGET_F_GETLK:
3081 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3082 return -TARGET_EFAULT;
3083 fl.l_type = tswap16(target_fl->l_type);
3084 fl.l_whence = tswap16(target_fl->l_whence);
3085 fl.l_start = tswapl(target_fl->l_start);
3086 fl.l_len = tswapl(target_fl->l_len);
3087 fl.l_pid = tswapl(target_fl->l_pid);
3088 unlock_user_struct(target_fl, arg, 0);
3089 ret = get_errno(fcntl(fd, cmd, &fl));
3091 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3092 return -TARGET_EFAULT;
3093 target_fl->l_type = tswap16(fl.l_type);
3094 target_fl->l_whence = tswap16(fl.l_whence);
3095 target_fl->l_start = tswapl(fl.l_start);
3096 target_fl->l_len = tswapl(fl.l_len);
3097 target_fl->l_pid = tswapl(fl.l_pid);
3098 unlock_user_struct(target_fl, arg, 1);
3102 case TARGET_F_SETLK:
3103 case TARGET_F_SETLKW:
3104 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3105 return -TARGET_EFAULT;
3106 fl.l_type = tswap16(target_fl->l_type);
3107 fl.l_whence = tswap16(target_fl->l_whence);
3108 fl.l_start = tswapl(target_fl->l_start);
3109 fl.l_len = tswapl(target_fl->l_len);
3110 fl.l_pid = tswapl(target_fl->l_pid);
3111 unlock_user_struct(target_fl, arg, 0);
3112 ret = get_errno(fcntl(fd, cmd, &fl));
3115 case TARGET_F_GETLK64:
3116 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3117 return -TARGET_EFAULT;
3118 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3119 fl64.l_whence = tswap16(target_fl64->l_whence);
3120 fl64.l_start = tswapl(target_fl64->l_start);
3121 fl64.l_len = tswapl(target_fl64->l_len);
3122 fl64.l_pid = tswap16(target_fl64->l_pid);
3123 unlock_user_struct(target_fl64, arg, 0);
3124 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3126 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3127 return -TARGET_EFAULT;
3128 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3129 target_fl64->l_whence = tswap16(fl64.l_whence);
3130 target_fl64->l_start = tswapl(fl64.l_start);
3131 target_fl64->l_len = tswapl(fl64.l_len);
3132 target_fl64->l_pid = tswapl(fl64.l_pid);
3133 unlock_user_struct(target_fl64, arg, 1);
3136 case TARGET_F_SETLK64:
3137 case TARGET_F_SETLKW64:
3138 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3139 return -TARGET_EFAULT;
3140 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3141 fl64.l_whence = tswap16(target_fl64->l_whence);
3142 fl64.l_start = tswapl(target_fl64->l_start);
3143 fl64.l_len = tswapl(target_fl64->l_len);
3144 fl64.l_pid = tswap16(target_fl64->l_pid);
3145 unlock_user_struct(target_fl64, arg, 0);
3146 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3150 ret = get_errno(fcntl(fd, cmd, arg));
3152 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3157 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3161 ret = get_errno(fcntl(fd, cmd, arg));
3169 static inline int high2lowuid(int uid)
3177 static inline int high2lowgid(int gid)
3185 static inline int low2highuid(int uid)
3187 if ((int16_t)uid == -1)
3193 static inline int low2highgid(int gid)
3195 if ((int16_t)gid == -1)
3201 #endif /* USE_UID16 */
3203 void syscall_init(void)
3206 const argtype *arg_type;
3210 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3211 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3212 #include "syscall_types.h"
3214 #undef STRUCT_SPECIAL
3216 /* we patch the ioctl size if necessary. We rely on the fact that
3217 no ioctl has all the bits at '1' in the size field */
3219 while (ie->target_cmd != 0) {
3220 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3221 TARGET_IOC_SIZEMASK) {
3222 arg_type = ie->arg_type;
3223 if (arg_type[0] != TYPE_PTR) {
3224 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3229 size = thunk_type_size(arg_type, 0);
3230 ie->target_cmd = (ie->target_cmd &
3231 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3232 (size << TARGET_IOC_SIZESHIFT);
3235 /* Build target_to_host_errno_table[] table from
3236 * host_to_target_errno_table[]. */
3237 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3238 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3240 /* automatic consistency check if same arch */
3241 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3242 (defined(__x86_64__) && defined(TARGET_X86_64))
3243 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3244 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3245 ie->name, ie->target_cmd, ie->host_cmd);
3252 #if TARGET_ABI_BITS == 32
3253 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3255 #ifdef TARGET_WORDS_BIGENDIAN
3256 return ((uint64_t)word0 << 32) | word1;
3258 return ((uint64_t)word1 << 32) | word0;
3261 #else /* TARGET_ABI_BITS == 32 */
3262 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3266 #endif /* TARGET_ABI_BITS != 32 */
3268 #ifdef TARGET_NR_truncate64
3269 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3275 if (((CPUARMState *)cpu_env)->eabi)
3281 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3285 #ifdef TARGET_NR_ftruncate64
3286 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3292 if (((CPUARMState *)cpu_env)->eabi)
3298 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3302 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3303 abi_ulong target_addr)
3305 struct target_timespec *target_ts;
3307 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3308 return -TARGET_EFAULT;
3309 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3310 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3311 unlock_user_struct(target_ts, target_addr, 0);
3315 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3316 struct timespec *host_ts)
3318 struct target_timespec *target_ts;
3320 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3321 return -TARGET_EFAULT;
3322 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3323 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3324 unlock_user_struct(target_ts, target_addr, 1);
3328 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3329 static inline abi_long host_to_target_stat64(void *cpu_env,
3330 abi_ulong target_addr,
3331 struct stat *host_st)
3334 if (((CPUARMState *)cpu_env)->eabi) {
3335 struct target_eabi_stat64 *target_st;
3337 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3338 return -TARGET_EFAULT;
3339 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3340 __put_user(host_st->st_dev, &target_st->st_dev);
3341 __put_user(host_st->st_ino, &target_st->st_ino);
3342 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3343 __put_user(host_st->st_ino, &target_st->__st_ino);
3345 __put_user(host_st->st_mode, &target_st->st_mode);
3346 __put_user(host_st->st_nlink, &target_st->st_nlink);
3347 __put_user(host_st->st_uid, &target_st->st_uid);
3348 __put_user(host_st->st_gid, &target_st->st_gid);
3349 __put_user(host_st->st_rdev, &target_st->st_rdev);
3350 __put_user(host_st->st_size, &target_st->st_size);
3351 __put_user(host_st->st_blksize, &target_st->st_blksize);
3352 __put_user(host_st->st_blocks, &target_st->st_blocks);
3353 __put_user(host_st->st_atime, &target_st->target_st_atime);
3354 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3355 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3356 unlock_user_struct(target_st, target_addr, 1);
3360 #if TARGET_LONG_BITS == 64
3361 struct target_stat *target_st;
3363 struct target_stat64 *target_st;
3366 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3367 return -TARGET_EFAULT;
3368 memset(target_st, 0, sizeof(*target_st));
3369 __put_user(host_st->st_dev, &target_st->st_dev);
3370 __put_user(host_st->st_ino, &target_st->st_ino);
3371 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3372 __put_user(host_st->st_ino, &target_st->__st_ino);
3374 __put_user(host_st->st_mode, &target_st->st_mode);
3375 __put_user(host_st->st_nlink, &target_st->st_nlink);
3376 __put_user(host_st->st_uid, &target_st->st_uid);
3377 __put_user(host_st->st_gid, &target_st->st_gid);
3378 __put_user(host_st->st_rdev, &target_st->st_rdev);
3379 /* XXX: better use of kernel struct */
3380 __put_user(host_st->st_size, &target_st->st_size);
3381 __put_user(host_st->st_blksize, &target_st->st_blksize);
3382 __put_user(host_st->st_blocks, &target_st->st_blocks);
3383 __put_user(host_st->st_atime, &target_st->target_st_atime);
3384 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3385 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3386 unlock_user_struct(target_st, target_addr, 1);
3393 #if defined(USE_NPTL)
3394 /* ??? Using host futex calls even when target atomic operations
3395 are not really atomic probably breaks things. However implementing
3396 futexes locally would make futexes shared between multiple processes
3397 tricky. However they're probably useless because guest atomic
3398 operations won't work either. */
3399 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3400 target_ulong uaddr2, int val3)
3402 struct timespec ts, *pts;
3404 /* ??? We assume FUTEX_* constants are the same on both host
3410 target_to_host_timespec(pts, timeout);
3414 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3417 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3419 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3421 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3422 NULL, g2h(uaddr2), 0));
3423 case FUTEX_CMP_REQUEUE:
3424 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3425 NULL, g2h(uaddr2), tswap32(val3)));
3427 return -TARGET_ENOSYS;
3432 int get_osversion(void)
3434 static int osversion;
3435 struct new_utsname buf;
3440 if (qemu_uname_release && *qemu_uname_release) {
3441 s = qemu_uname_release;
3443 if (sys_uname(&buf))
3448 for (i = 0; i < 3; i++) {
3450 while (*s >= '0' && *s <= '9') {
3455 tmp = (tmp << 8) + n;
3463 /* do_syscall() should always have a single exit point at the end so
3464 that actions, such as logging of syscall results, can be performed.
3465 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3466 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3467 abi_long arg2, abi_long arg3, abi_long arg4,
3468 abi_long arg5, abi_long arg6)
3476 gemu_log("syscall %d", num);
3479 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3482 case TARGET_NR_exit:
3484 /* In old applications this may be used to implement _exit(2).
3485 However in threaded applictions it is used for thread termination,
3486 and _exit_group is used for application termination.
3487 Do thread termination if we have more then one thread. */
3488 /* FIXME: This probably breaks if a signal arrives. We should probably
3489 be disabling signals. */
3490 if (first_cpu->next_cpu) {
3497 while (p && p != (CPUState *)cpu_env) {
3498 lastp = &p->next_cpu;
3501 /* If we didn't find the CPU for this thread then something is
3505 /* Remove the CPU from the list. */
3506 *lastp = p->next_cpu;
3508 TaskState *ts = ((CPUState *)cpu_env)->opaque;
3509 if (ts->child_tidptr) {
3510 put_user_u32(0, ts->child_tidptr);
3511 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
3514 /* TODO: Free CPU state. */
3521 gdb_exit(cpu_env, arg1);
3523 ret = 0; /* avoid warning */
3525 case TARGET_NR_read:
3529 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3531 ret = get_errno(read(arg1, p, arg3));
3532 unlock_user(p, arg2, ret);
3535 case TARGET_NR_write:
3536 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3538 ret = get_errno(write(arg1, p, arg3));
3539 unlock_user(p, arg2, 0);
3541 case TARGET_NR_open:
3542 if (!(p = lock_user_string(arg1)))
3544 ret = get_errno(open(path(p),
3545 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3547 unlock_user(p, arg1, 0);
3549 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3550 case TARGET_NR_openat:
3551 if (!(p = lock_user_string(arg2)))
3553 ret = get_errno(sys_openat(arg1,
3555 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3557 unlock_user(p, arg2, 0);
3560 case TARGET_NR_close:
3561 ret = get_errno(close(arg1));
3566 case TARGET_NR_fork:
3567 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3569 #ifdef TARGET_NR_waitpid
3570 case TARGET_NR_waitpid:
3573 ret = get_errno(waitpid(arg1, &status, arg3));
3574 if (!is_error(ret) && arg2
3575 && put_user_s32(status, arg2))
3580 #ifdef TARGET_NR_waitid
3581 case TARGET_NR_waitid:
3585 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3586 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3587 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3589 host_to_target_siginfo(p, &info);
3590 unlock_user(p, arg3, sizeof(target_siginfo_t));
3595 #ifdef TARGET_NR_creat /* not on alpha */
3596 case TARGET_NR_creat:
3597 if (!(p = lock_user_string(arg1)))
3599 ret = get_errno(creat(p, arg2));
3600 unlock_user(p, arg1, 0);
3603 case TARGET_NR_link:
3606 p = lock_user_string(arg1);
3607 p2 = lock_user_string(arg2);
3609 ret = -TARGET_EFAULT;
3611 ret = get_errno(link(p, p2));
3612 unlock_user(p2, arg2, 0);
3613 unlock_user(p, arg1, 0);
3616 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3617 case TARGET_NR_linkat:
3622 p = lock_user_string(arg2);
3623 p2 = lock_user_string(arg4);
3625 ret = -TARGET_EFAULT;
3627 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3628 unlock_user(p, arg2, 0);
3629 unlock_user(p2, arg4, 0);
3633 case TARGET_NR_unlink:
3634 if (!(p = lock_user_string(arg1)))
3636 ret = get_errno(unlink(p));
3637 unlock_user(p, arg1, 0);
3639 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3640 case TARGET_NR_unlinkat:
3641 if (!(p = lock_user_string(arg2)))
3643 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3644 unlock_user(p, arg2, 0);
3647 case TARGET_NR_execve:
3649 char **argp, **envp;
3652 abi_ulong guest_argp;
3653 abi_ulong guest_envp;
3659 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3660 if (get_user_ual(addr, gp))
3668 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3669 if (get_user_ual(addr, gp))
3676 argp = alloca((argc + 1) * sizeof(void *));
3677 envp = alloca((envc + 1) * sizeof(void *));
3679 for (gp = guest_argp, q = argp; gp;
3680 gp += sizeof(abi_ulong), q++) {
3681 if (get_user_ual(addr, gp))
3685 if (!(*q = lock_user_string(addr)))
3690 for (gp = guest_envp, q = envp; gp;
3691 gp += sizeof(abi_ulong), q++) {
3692 if (get_user_ual(addr, gp))
3696 if (!(*q = lock_user_string(addr)))
3701 if (!(p = lock_user_string(arg1)))
3703 ret = get_errno(execve(p, argp, envp));
3704 unlock_user(p, arg1, 0);
3709 ret = -TARGET_EFAULT;
3712 for (gp = guest_argp, q = argp; *q;
3713 gp += sizeof(abi_ulong), q++) {
3714 if (get_user_ual(addr, gp)
3717 unlock_user(*q, addr, 0);
3719 for (gp = guest_envp, q = envp; *q;
3720 gp += sizeof(abi_ulong), q++) {
3721 if (get_user_ual(addr, gp)
3724 unlock_user(*q, addr, 0);
3728 case TARGET_NR_chdir:
3729 if (!(p = lock_user_string(arg1)))
3731 ret = get_errno(chdir(p));
3732 unlock_user(p, arg1, 0);
3734 #ifdef TARGET_NR_time
3735 case TARGET_NR_time:
3738 ret = get_errno(time(&host_time));
3741 && put_user_sal(host_time, arg1))
3746 case TARGET_NR_mknod:
3747 if (!(p = lock_user_string(arg1)))
3749 ret = get_errno(mknod(p, arg2, arg3));
3750 unlock_user(p, arg1, 0);
3752 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3753 case TARGET_NR_mknodat:
3754 if (!(p = lock_user_string(arg2)))
3756 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3757 unlock_user(p, arg2, 0);
3760 case TARGET_NR_chmod:
3761 if (!(p = lock_user_string(arg1)))
3763 ret = get_errno(chmod(p, arg2));
3764 unlock_user(p, arg1, 0);
3766 #ifdef TARGET_NR_break
3767 case TARGET_NR_break:
3770 #ifdef TARGET_NR_oldstat
3771 case TARGET_NR_oldstat:
3774 case TARGET_NR_lseek:
3775 ret = get_errno(lseek(arg1, arg2, arg3));
3777 #ifdef TARGET_NR_getxpid
3778 case TARGET_NR_getxpid:
3780 case TARGET_NR_getpid:
3782 ret = get_errno(getpid());
3784 case TARGET_NR_mount:
3786 /* need to look at the data field */
3788 p = lock_user_string(arg1);
3789 p2 = lock_user_string(arg2);
3790 p3 = lock_user_string(arg3);
3791 if (!p || !p2 || !p3)
3792 ret = -TARGET_EFAULT;
3794 /* FIXME - arg5 should be locked, but it isn't clear how to
3795 * do that since it's not guaranteed to be a NULL-terminated
3798 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
3799 unlock_user(p, arg1, 0);
3800 unlock_user(p2, arg2, 0);
3801 unlock_user(p3, arg3, 0);
3804 #ifdef TARGET_NR_umount
3805 case TARGET_NR_umount:
3806 if (!(p = lock_user_string(arg1)))
3808 ret = get_errno(umount(p));
3809 unlock_user(p, arg1, 0);
3812 #ifdef TARGET_NR_stime /* not on alpha */
3813 case TARGET_NR_stime:
3816 if (get_user_sal(host_time, arg1))
3818 ret = get_errno(stime(&host_time));
3822 case TARGET_NR_ptrace:
3824 #ifdef TARGET_NR_alarm /* not on alpha */
3825 case TARGET_NR_alarm:
3829 #ifdef TARGET_NR_oldfstat
3830 case TARGET_NR_oldfstat:
3833 #ifdef TARGET_NR_pause /* not on alpha */
3834 case TARGET_NR_pause:
3835 ret = get_errno(pause());
3838 #ifdef TARGET_NR_utime
3839 case TARGET_NR_utime:
3841 struct utimbuf tbuf, *host_tbuf;
3842 struct target_utimbuf *target_tbuf;
3844 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
3846 tbuf.actime = tswapl(target_tbuf->actime);
3847 tbuf.modtime = tswapl(target_tbuf->modtime);
3848 unlock_user_struct(target_tbuf, arg2, 0);
3853 if (!(p = lock_user_string(arg1)))
3855 ret = get_errno(utime(p, host_tbuf));
3856 unlock_user(p, arg1, 0);
3860 case TARGET_NR_utimes:
3862 struct timeval *tvp, tv[2];
3864 if (copy_from_user_timeval(&tv[0], arg2)
3865 || copy_from_user_timeval(&tv[1],
3866 arg2 + sizeof(struct target_timeval)))
3872 if (!(p = lock_user_string(arg1)))
3874 ret = get_errno(utimes(p, tvp));
3875 unlock_user(p, arg1, 0);
3878 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
3879 case TARGET_NR_futimesat:
3881 struct timeval *tvp, tv[2];
3883 if (copy_from_user_timeval(&tv[0], arg3)
3884 || copy_from_user_timeval(&tv[1],
3885 arg3 + sizeof(struct target_timeval)))
3891 if (!(p = lock_user_string(arg2)))
3893 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
3894 unlock_user(p, arg2, 0);
3898 #ifdef TARGET_NR_stty
3899 case TARGET_NR_stty:
3902 #ifdef TARGET_NR_gtty
3903 case TARGET_NR_gtty:
3906 case TARGET_NR_access:
3907 if (!(p = lock_user_string(arg1)))
3909 ret = get_errno(access(p, arg2));
3910 unlock_user(p, arg1, 0);
3912 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
3913 case TARGET_NR_faccessat:
3914 if (!(p = lock_user_string(arg2)))
3916 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
3917 unlock_user(p, arg2, 0);
3920 #ifdef TARGET_NR_nice /* not on alpha */
3921 case TARGET_NR_nice:
3922 ret = get_errno(nice(arg1));
3925 #ifdef TARGET_NR_ftime
3926 case TARGET_NR_ftime:
3929 case TARGET_NR_sync:
3933 case TARGET_NR_kill:
3934 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
3936 case TARGET_NR_rename:
3939 p = lock_user_string(arg1);
3940 p2 = lock_user_string(arg2);
3942 ret = -TARGET_EFAULT;
3944 ret = get_errno(rename(p, p2));
3945 unlock_user(p2, arg2, 0);
3946 unlock_user(p, arg1, 0);
3949 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
3950 case TARGET_NR_renameat:
3953 p = lock_user_string(arg2);
3954 p2 = lock_user_string(arg4);
3956 ret = -TARGET_EFAULT;
3958 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
3959 unlock_user(p2, arg4, 0);
3960 unlock_user(p, arg2, 0);
3964 case TARGET_NR_mkdir:
3965 if (!(p = lock_user_string(arg1)))
3967 ret = get_errno(mkdir(p, arg2));
3968 unlock_user(p, arg1, 0);
3970 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
3971 case TARGET_NR_mkdirat:
3972 if (!(p = lock_user_string(arg2)))
3974 ret = get_errno(sys_mkdirat(arg1, p, arg3));
3975 unlock_user(p, arg2, 0);
3978 case TARGET_NR_rmdir:
3979 if (!(p = lock_user_string(arg1)))
3981 ret = get_errno(rmdir(p));
3982 unlock_user(p, arg1, 0);
3985 ret = get_errno(dup(arg1));
3987 case TARGET_NR_pipe:
3990 ret = get_errno(pipe(host_pipe));
3991 if (!is_error(ret)) {
3992 #if defined(TARGET_MIPS)
3993 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
3994 env->active_tc.gpr[3] = host_pipe[1];
3996 #elif defined(TARGET_SH4)
3997 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4000 if (put_user_s32(host_pipe[0], arg1)
4001 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4007 case TARGET_NR_times:
4009 struct target_tms *tmsp;
4011 ret = get_errno(times(&tms));
4013 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4016 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4017 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4018 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4019 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4022 ret = host_to_target_clock_t(ret);
4025 #ifdef TARGET_NR_prof
4026 case TARGET_NR_prof:
4029 #ifdef TARGET_NR_signal
4030 case TARGET_NR_signal:
4033 case TARGET_NR_acct:
4035 ret = get_errno(acct(NULL));
4037 if (!(p = lock_user_string(arg1)))
4039 ret = get_errno(acct(path(p)));
4040 unlock_user(p, arg1, 0);
4043 #ifdef TARGET_NR_umount2 /* not on alpha */
4044 case TARGET_NR_umount2:
4045 if (!(p = lock_user_string(arg1)))
4047 ret = get_errno(umount2(p, arg2));
4048 unlock_user(p, arg1, 0);
4051 #ifdef TARGET_NR_lock
4052 case TARGET_NR_lock:
4055 case TARGET_NR_ioctl:
4056 ret = do_ioctl(arg1, arg2, arg3);
4058 case TARGET_NR_fcntl:
4059 ret = do_fcntl(arg1, arg2, arg3);
4061 #ifdef TARGET_NR_mpx
4065 case TARGET_NR_setpgid:
4066 ret = get_errno(setpgid(arg1, arg2));
4068 #ifdef TARGET_NR_ulimit
4069 case TARGET_NR_ulimit:
4072 #ifdef TARGET_NR_oldolduname
4073 case TARGET_NR_oldolduname:
4076 case TARGET_NR_umask:
4077 ret = get_errno(umask(arg1));
4079 case TARGET_NR_chroot:
4080 if (!(p = lock_user_string(arg1)))
4082 ret = get_errno(chroot(p));
4083 unlock_user(p, arg1, 0);
4085 case TARGET_NR_ustat:
4087 case TARGET_NR_dup2:
4088 ret = get_errno(dup2(arg1, arg2));
4090 #ifdef TARGET_NR_getppid /* not on alpha */
4091 case TARGET_NR_getppid:
4092 ret = get_errno(getppid());
4095 case TARGET_NR_getpgrp:
4096 ret = get_errno(getpgrp());
4098 case TARGET_NR_setsid:
4099 ret = get_errno(setsid());
4101 #ifdef TARGET_NR_sigaction
4102 case TARGET_NR_sigaction:
4104 #if !defined(TARGET_MIPS)
4105 struct target_old_sigaction *old_act;
4106 struct target_sigaction act, oact, *pact;
4108 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4110 act._sa_handler = old_act->_sa_handler;
4111 target_siginitset(&act.sa_mask, old_act->sa_mask);
4112 act.sa_flags = old_act->sa_flags;
4113 act.sa_restorer = old_act->sa_restorer;
4114 unlock_user_struct(old_act, arg2, 0);
4119 ret = get_errno(do_sigaction(arg1, pact, &oact));
4120 if (!is_error(ret) && arg3) {
4121 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4123 old_act->_sa_handler = oact._sa_handler;
4124 old_act->sa_mask = oact.sa_mask.sig[0];
4125 old_act->sa_flags = oact.sa_flags;
4126 old_act->sa_restorer = oact.sa_restorer;
4127 unlock_user_struct(old_act, arg3, 1);
4130 struct target_sigaction act, oact, *pact, *old_act;
4133 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4135 act._sa_handler = old_act->_sa_handler;
4136 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4137 act.sa_flags = old_act->sa_flags;
4138 unlock_user_struct(old_act, arg2, 0);
4144 ret = get_errno(do_sigaction(arg1, pact, &oact));
4146 if (!is_error(ret) && arg3) {
4147 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4149 old_act->_sa_handler = oact._sa_handler;
4150 old_act->sa_flags = oact.sa_flags;
4151 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4152 old_act->sa_mask.sig[1] = 0;
4153 old_act->sa_mask.sig[2] = 0;
4154 old_act->sa_mask.sig[3] = 0;
4155 unlock_user_struct(old_act, arg3, 1);
4161 case TARGET_NR_rt_sigaction:
4163 struct target_sigaction *act;
4164 struct target_sigaction *oact;
4167 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4172 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4173 ret = -TARGET_EFAULT;
4174 goto rt_sigaction_fail;
4178 ret = get_errno(do_sigaction(arg1, act, oact));
4181 unlock_user_struct(act, arg2, 0);
4183 unlock_user_struct(oact, arg3, 1);
4186 #ifdef TARGET_NR_sgetmask /* not on alpha */
4187 case TARGET_NR_sgetmask:
4190 abi_ulong target_set;
4191 sigprocmask(0, NULL, &cur_set);
4192 host_to_target_old_sigset(&target_set, &cur_set);
4197 #ifdef TARGET_NR_ssetmask /* not on alpha */
4198 case TARGET_NR_ssetmask:
4200 sigset_t set, oset, cur_set;
4201 abi_ulong target_set = arg1;
4202 sigprocmask(0, NULL, &cur_set);
4203 target_to_host_old_sigset(&set, &target_set);
4204 sigorset(&set, &set, &cur_set);
4205 sigprocmask(SIG_SETMASK, &set, &oset);
4206 host_to_target_old_sigset(&target_set, &oset);
4211 #ifdef TARGET_NR_sigprocmask
4212 case TARGET_NR_sigprocmask:
4215 sigset_t set, oldset, *set_ptr;
4219 case TARGET_SIG_BLOCK:
4222 case TARGET_SIG_UNBLOCK:
4225 case TARGET_SIG_SETMASK:
4229 ret = -TARGET_EINVAL;
4232 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4234 target_to_host_old_sigset(&set, p);
4235 unlock_user(p, arg2, 0);
4241 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4242 if (!is_error(ret) && arg3) {
4243 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4245 host_to_target_old_sigset(p, &oldset);
4246 unlock_user(p, arg3, sizeof(target_sigset_t));
4251 case TARGET_NR_rt_sigprocmask:
4254 sigset_t set, oldset, *set_ptr;
4258 case TARGET_SIG_BLOCK:
4261 case TARGET_SIG_UNBLOCK:
4264 case TARGET_SIG_SETMASK:
4268 ret = -TARGET_EINVAL;
4271 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4273 target_to_host_sigset(&set, p);
4274 unlock_user(p, arg2, 0);
4280 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4281 if (!is_error(ret) && arg3) {
4282 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4284 host_to_target_sigset(p, &oldset);
4285 unlock_user(p, arg3, sizeof(target_sigset_t));
4289 #ifdef TARGET_NR_sigpending
4290 case TARGET_NR_sigpending:
4293 ret = get_errno(sigpending(&set));
4294 if (!is_error(ret)) {
4295 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4297 host_to_target_old_sigset(p, &set);
4298 unlock_user(p, arg1, sizeof(target_sigset_t));
4303 case TARGET_NR_rt_sigpending:
4306 ret = get_errno(sigpending(&set));
4307 if (!is_error(ret)) {
4308 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4310 host_to_target_sigset(p, &set);
4311 unlock_user(p, arg1, sizeof(target_sigset_t));
4315 #ifdef TARGET_NR_sigsuspend
4316 case TARGET_NR_sigsuspend:
4319 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4321 target_to_host_old_sigset(&set, p);
4322 unlock_user(p, arg1, 0);
4323 ret = get_errno(sigsuspend(&set));
4327 case TARGET_NR_rt_sigsuspend:
4330 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4332 target_to_host_sigset(&set, p);
4333 unlock_user(p, arg1, 0);
4334 ret = get_errno(sigsuspend(&set));
4337 case TARGET_NR_rt_sigtimedwait:
4340 struct timespec uts, *puts;
4343 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4345 target_to_host_sigset(&set, p);
4346 unlock_user(p, arg1, 0);
4349 target_to_host_timespec(puts, arg3);
4353 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4354 if (!is_error(ret) && arg2) {
4355 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4357 host_to_target_siginfo(p, &uinfo);
4358 unlock_user(p, arg2, sizeof(target_siginfo_t));
4362 case TARGET_NR_rt_sigqueueinfo:
4365 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4367 target_to_host_siginfo(&uinfo, p);
4368 unlock_user(p, arg1, 0);
4369 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4372 #ifdef TARGET_NR_sigreturn
4373 case TARGET_NR_sigreturn:
4374 /* NOTE: ret is eax, so not transcoding must be done */
4375 ret = do_sigreturn(cpu_env);
4378 case TARGET_NR_rt_sigreturn:
4379 /* NOTE: ret is eax, so not transcoding must be done */
4380 ret = do_rt_sigreturn(cpu_env);
4382 case TARGET_NR_sethostname:
4383 if (!(p = lock_user_string(arg1)))
4385 ret = get_errno(sethostname(p, arg2));
4386 unlock_user(p, arg1, 0);
4388 case TARGET_NR_setrlimit:
4390 /* XXX: convert resource ? */
4391 int resource = arg1;
4392 struct target_rlimit *target_rlim;
4394 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4396 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4397 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4398 unlock_user_struct(target_rlim, arg2, 0);
4399 ret = get_errno(setrlimit(resource, &rlim));
4402 case TARGET_NR_getrlimit:
4404 /* XXX: convert resource ? */
4405 int resource = arg1;
4406 struct target_rlimit *target_rlim;
4409 ret = get_errno(getrlimit(resource, &rlim));
4410 if (!is_error(ret)) {
4411 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4413 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4414 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4415 unlock_user_struct(target_rlim, arg2, 1);
4419 case TARGET_NR_getrusage:
4421 struct rusage rusage;
4422 ret = get_errno(getrusage(arg1, &rusage));
4423 if (!is_error(ret)) {
4424 host_to_target_rusage(arg2, &rusage);
4428 case TARGET_NR_gettimeofday:
4431 ret = get_errno(gettimeofday(&tv, NULL));
4432 if (!is_error(ret)) {
4433 if (copy_to_user_timeval(arg1, &tv))
4438 case TARGET_NR_settimeofday:
4441 if (copy_from_user_timeval(&tv, arg1))
4443 ret = get_errno(settimeofday(&tv, NULL));
4446 #ifdef TARGET_NR_select
4447 case TARGET_NR_select:
4449 struct target_sel_arg_struct *sel;
4450 abi_ulong inp, outp, exp, tvp;
4453 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4455 nsel = tswapl(sel->n);
4456 inp = tswapl(sel->inp);
4457 outp = tswapl(sel->outp);
4458 exp = tswapl(sel->exp);
4459 tvp = tswapl(sel->tvp);
4460 unlock_user_struct(sel, arg1, 0);
4461 ret = do_select(nsel, inp, outp, exp, tvp);
4465 case TARGET_NR_symlink:
4468 p = lock_user_string(arg1);
4469 p2 = lock_user_string(arg2);
4471 ret = -TARGET_EFAULT;
4473 ret = get_errno(symlink(p, p2));
4474 unlock_user(p2, arg2, 0);
4475 unlock_user(p, arg1, 0);
4478 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4479 case TARGET_NR_symlinkat:
4482 p = lock_user_string(arg1);
4483 p2 = lock_user_string(arg3);
4485 ret = -TARGET_EFAULT;
4487 ret = get_errno(sys_symlinkat(p, arg2, p2));
4488 unlock_user(p2, arg3, 0);
4489 unlock_user(p, arg1, 0);
4493 #ifdef TARGET_NR_oldlstat
4494 case TARGET_NR_oldlstat:
4497 case TARGET_NR_readlink:
4500 p = lock_user_string(arg1);
4501 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4503 ret = -TARGET_EFAULT;
4505 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
4506 char real[PATH_MAX];
4507 temp = realpath(exec_path,real);
4508 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
4509 snprintf((char *)p2, arg3, "%s", real);
4512 ret = get_errno(readlink(path(p), p2, arg3));
4514 unlock_user(p2, arg2, ret);
4515 unlock_user(p, arg1, 0);
4518 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4519 case TARGET_NR_readlinkat:
4522 p = lock_user_string(arg2);
4523 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4525 ret = -TARGET_EFAULT;
4527 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4528 unlock_user(p2, arg3, ret);
4529 unlock_user(p, arg2, 0);
4533 #ifdef TARGET_NR_uselib
4534 case TARGET_NR_uselib:
4537 #ifdef TARGET_NR_swapon
4538 case TARGET_NR_swapon:
4539 if (!(p = lock_user_string(arg1)))
4541 ret = get_errno(swapon(p, arg2));
4542 unlock_user(p, arg1, 0);
4545 case TARGET_NR_reboot:
4547 #ifdef TARGET_NR_readdir
4548 case TARGET_NR_readdir:
4551 #ifdef TARGET_NR_mmap
4552 case TARGET_NR_mmap:
4553 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4556 abi_ulong v1, v2, v3, v4, v5, v6;
4557 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4565 unlock_user(v, arg1, 0);
4566 ret = get_errno(target_mmap(v1, v2, v3,
4567 target_to_host_bitmask(v4, mmap_flags_tbl),
4571 ret = get_errno(target_mmap(arg1, arg2, arg3,
4572 target_to_host_bitmask(arg4, mmap_flags_tbl),
4578 #ifdef TARGET_NR_mmap2
4579 case TARGET_NR_mmap2:
4581 #define MMAP_SHIFT 12
4583 ret = get_errno(target_mmap(arg1, arg2, arg3,
4584 target_to_host_bitmask(arg4, mmap_flags_tbl),
4586 arg6 << MMAP_SHIFT));
4589 case TARGET_NR_munmap:
4590 ret = get_errno(target_munmap(arg1, arg2));
4592 case TARGET_NR_mprotect:
4593 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4595 #ifdef TARGET_NR_mremap
4596 case TARGET_NR_mremap:
4597 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4600 /* ??? msync/mlock/munlock are broken for softmmu. */
4601 #ifdef TARGET_NR_msync
4602 case TARGET_NR_msync:
4603 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4606 #ifdef TARGET_NR_mlock
4607 case TARGET_NR_mlock:
4608 ret = get_errno(mlock(g2h(arg1), arg2));
4611 #ifdef TARGET_NR_munlock
4612 case TARGET_NR_munlock:
4613 ret = get_errno(munlock(g2h(arg1), arg2));
4616 #ifdef TARGET_NR_mlockall
4617 case TARGET_NR_mlockall:
4618 ret = get_errno(mlockall(arg1));
4621 #ifdef TARGET_NR_munlockall
4622 case TARGET_NR_munlockall:
4623 ret = get_errno(munlockall());
4626 case TARGET_NR_truncate:
4627 if (!(p = lock_user_string(arg1)))
4629 ret = get_errno(truncate(p, arg2));
4630 unlock_user(p, arg1, 0);
4632 case TARGET_NR_ftruncate:
4633 ret = get_errno(ftruncate(arg1, arg2));
4635 case TARGET_NR_fchmod:
4636 ret = get_errno(fchmod(arg1, arg2));
4638 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4639 case TARGET_NR_fchmodat:
4640 if (!(p = lock_user_string(arg2)))
4642 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4643 unlock_user(p, arg2, 0);
4646 case TARGET_NR_getpriority:
4647 /* libc does special remapping of the return value of
4648 * sys_getpriority() so it's just easiest to call
4649 * sys_getpriority() directly rather than through libc. */
4650 ret = sys_getpriority(arg1, arg2);
4652 case TARGET_NR_setpriority:
4653 ret = get_errno(setpriority(arg1, arg2, arg3));
4655 #ifdef TARGET_NR_profil
4656 case TARGET_NR_profil:
4659 case TARGET_NR_statfs:
4660 if (!(p = lock_user_string(arg1)))
4662 ret = get_errno(statfs(path(p), &stfs));
4663 unlock_user(p, arg1, 0);
4665 if (!is_error(ret)) {
4666 struct target_statfs *target_stfs;
4668 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4670 __put_user(stfs.f_type, &target_stfs->f_type);
4671 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4672 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4673 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4674 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4675 __put_user(stfs.f_files, &target_stfs->f_files);
4676 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4677 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4678 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4679 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4680 unlock_user_struct(target_stfs, arg2, 1);
4683 case TARGET_NR_fstatfs:
4684 ret = get_errno(fstatfs(arg1, &stfs));
4685 goto convert_statfs;
4686 #ifdef TARGET_NR_statfs64
4687 case TARGET_NR_statfs64:
4688 if (!(p = lock_user_string(arg1)))
4690 ret = get_errno(statfs(path(p), &stfs));
4691 unlock_user(p, arg1, 0);
4693 if (!is_error(ret)) {
4694 struct target_statfs64 *target_stfs;
4696 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4698 __put_user(stfs.f_type, &target_stfs->f_type);
4699 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4700 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4701 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4702 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4703 __put_user(stfs.f_files, &target_stfs->f_files);
4704 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4705 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4706 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4707 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4708 unlock_user_struct(target_stfs, arg3, 1);
4711 case TARGET_NR_fstatfs64:
4712 ret = get_errno(fstatfs(arg1, &stfs));
4713 goto convert_statfs64;
4715 #ifdef TARGET_NR_ioperm
4716 case TARGET_NR_ioperm:
4719 #ifdef TARGET_NR_socketcall
4720 case TARGET_NR_socketcall:
4721 ret = do_socketcall(arg1, arg2);
4724 #ifdef TARGET_NR_accept
4725 case TARGET_NR_accept:
4726 ret = do_accept(arg1, arg2, arg3);
4729 #ifdef TARGET_NR_bind
4730 case TARGET_NR_bind:
4731 ret = do_bind(arg1, arg2, arg3);
4734 #ifdef TARGET_NR_connect
4735 case TARGET_NR_connect:
4736 ret = do_connect(arg1, arg2, arg3);
4739 #ifdef TARGET_NR_getpeername
4740 case TARGET_NR_getpeername:
4741 ret = do_getpeername(arg1, arg2, arg3);
4744 #ifdef TARGET_NR_getsockname
4745 case TARGET_NR_getsockname:
4746 ret = do_getsockname(arg1, arg2, arg3);
4749 #ifdef TARGET_NR_getsockopt
4750 case TARGET_NR_getsockopt:
4751 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4754 #ifdef TARGET_NR_listen
4755 case TARGET_NR_listen:
4756 ret = get_errno(listen(arg1, arg2));
4759 #ifdef TARGET_NR_recv
4760 case TARGET_NR_recv:
4761 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4764 #ifdef TARGET_NR_recvfrom
4765 case TARGET_NR_recvfrom:
4766 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4769 #ifdef TARGET_NR_recvmsg
4770 case TARGET_NR_recvmsg:
4771 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4774 #ifdef TARGET_NR_send
4775 case TARGET_NR_send:
4776 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4779 #ifdef TARGET_NR_sendmsg
4780 case TARGET_NR_sendmsg:
4781 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
4784 #ifdef TARGET_NR_sendto
4785 case TARGET_NR_sendto:
4786 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
4789 #ifdef TARGET_NR_shutdown
4790 case TARGET_NR_shutdown:
4791 ret = get_errno(shutdown(arg1, arg2));
4794 #ifdef TARGET_NR_socket
4795 case TARGET_NR_socket:
4796 ret = do_socket(arg1, arg2, arg3);
4799 #ifdef TARGET_NR_socketpair
4800 case TARGET_NR_socketpair:
4801 ret = do_socketpair(arg1, arg2, arg3, arg4);
4804 #ifdef TARGET_NR_setsockopt
4805 case TARGET_NR_setsockopt:
4806 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
4810 case TARGET_NR_syslog:
4811 if (!(p = lock_user_string(arg2)))
4813 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
4814 unlock_user(p, arg2, 0);
4817 case TARGET_NR_setitimer:
4819 struct itimerval value, ovalue, *pvalue;
4823 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
4824 || copy_from_user_timeval(&pvalue->it_value,
4825 arg2 + sizeof(struct target_timeval)))
4830 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
4831 if (!is_error(ret) && arg3) {
4832 if (copy_to_user_timeval(arg3,
4833 &ovalue.it_interval)
4834 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
4840 case TARGET_NR_getitimer:
4842 struct itimerval value;
4844 ret = get_errno(getitimer(arg1, &value));
4845 if (!is_error(ret) && arg2) {
4846 if (copy_to_user_timeval(arg2,
4848 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
4854 case TARGET_NR_stat:
4855 if (!(p = lock_user_string(arg1)))
4857 ret = get_errno(stat(path(p), &st));
4858 unlock_user(p, arg1, 0);
4860 case TARGET_NR_lstat:
4861 if (!(p = lock_user_string(arg1)))
4863 ret = get_errno(lstat(path(p), &st));
4864 unlock_user(p, arg1, 0);
4866 case TARGET_NR_fstat:
4868 ret = get_errno(fstat(arg1, &st));
4870 if (!is_error(ret)) {
4871 struct target_stat *target_st;
4873 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
4875 __put_user(st.st_dev, &target_st->st_dev);
4876 __put_user(st.st_ino, &target_st->st_ino);
4877 __put_user(st.st_mode, &target_st->st_mode);
4878 __put_user(st.st_uid, &target_st->st_uid);
4879 __put_user(st.st_gid, &target_st->st_gid);
4880 __put_user(st.st_nlink, &target_st->st_nlink);
4881 __put_user(st.st_rdev, &target_st->st_rdev);
4882 __put_user(st.st_size, &target_st->st_size);
4883 __put_user(st.st_blksize, &target_st->st_blksize);
4884 __put_user(st.st_blocks, &target_st->st_blocks);
4885 __put_user(st.st_atime, &target_st->target_st_atime);
4886 __put_user(st.st_mtime, &target_st->target_st_mtime);
4887 __put_user(st.st_ctime, &target_st->target_st_ctime);
4888 unlock_user_struct(target_st, arg2, 1);
4892 #ifdef TARGET_NR_olduname
4893 case TARGET_NR_olduname:
4896 #ifdef TARGET_NR_iopl
4897 case TARGET_NR_iopl:
4900 case TARGET_NR_vhangup:
4901 ret = get_errno(vhangup());
4903 #ifdef TARGET_NR_idle
4904 case TARGET_NR_idle:
4907 #ifdef TARGET_NR_syscall
4908 case TARGET_NR_syscall:
4909 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
4912 case TARGET_NR_wait4:
4915 abi_long status_ptr = arg2;
4916 struct rusage rusage, *rusage_ptr;
4917 abi_ulong target_rusage = arg4;
4919 rusage_ptr = &rusage;
4922 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
4923 if (!is_error(ret)) {
4925 if (put_user_s32(status, status_ptr))
4929 host_to_target_rusage(target_rusage, &rusage);
4933 #ifdef TARGET_NR_swapoff
4934 case TARGET_NR_swapoff:
4935 if (!(p = lock_user_string(arg1)))
4937 ret = get_errno(swapoff(p));
4938 unlock_user(p, arg1, 0);
4941 case TARGET_NR_sysinfo:
4943 struct target_sysinfo *target_value;
4944 struct sysinfo value;
4945 ret = get_errno(sysinfo(&value));
4946 if (!is_error(ret) && arg1)
4948 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
4950 __put_user(value.uptime, &target_value->uptime);
4951 __put_user(value.loads[0], &target_value->loads[0]);
4952 __put_user(value.loads[1], &target_value->loads[1]);
4953 __put_user(value.loads[2], &target_value->loads[2]);
4954 __put_user(value.totalram, &target_value->totalram);
4955 __put_user(value.freeram, &target_value->freeram);
4956 __put_user(value.sharedram, &target_value->sharedram);
4957 __put_user(value.bufferram, &target_value->bufferram);
4958 __put_user(value.totalswap, &target_value->totalswap);
4959 __put_user(value.freeswap, &target_value->freeswap);
4960 __put_user(value.procs, &target_value->procs);
4961 __put_user(value.totalhigh, &target_value->totalhigh);
4962 __put_user(value.freehigh, &target_value->freehigh);
4963 __put_user(value.mem_unit, &target_value->mem_unit);
4964 unlock_user_struct(target_value, arg1, 1);
4968 #ifdef TARGET_NR_ipc
4970 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
4974 #ifdef TARGET_NR_msgctl
4975 case TARGET_NR_msgctl:
4976 ret = do_msgctl(arg1, arg2, arg3);
4979 #ifdef TARGET_NR_msgget
4980 case TARGET_NR_msgget:
4981 ret = get_errno(msgget(arg1, arg2));
4984 #ifdef TARGET_NR_msgrcv
4985 case TARGET_NR_msgrcv:
4986 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
4989 #ifdef TARGET_NR_msgsnd
4990 case TARGET_NR_msgsnd:
4991 ret = do_msgsnd(arg1, arg2, arg3, arg4);
4994 case TARGET_NR_fsync:
4995 ret = get_errno(fsync(arg1));
4997 case TARGET_NR_clone:
4998 #if defined(TARGET_SH4)
4999 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5000 #elif defined(TARGET_CRIS)
5001 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5003 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5006 #ifdef __NR_exit_group
5007 /* new thread calls */
5008 case TARGET_NR_exit_group:
5012 gdb_exit(cpu_env, arg1);
5013 ret = get_errno(exit_group(arg1));
5016 case TARGET_NR_setdomainname:
5017 if (!(p = lock_user_string(arg1)))
5019 ret = get_errno(setdomainname(p, arg2));
5020 unlock_user(p, arg1, 0);
5022 case TARGET_NR_uname:
5023 /* no need to transcode because we use the linux syscall */
5025 struct new_utsname * buf;
5027 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5029 ret = get_errno(sys_uname(buf));
5030 if (!is_error(ret)) {
5031 /* Overrite the native machine name with whatever is being
5033 strcpy (buf->machine, UNAME_MACHINE);
5034 /* Allow the user to override the reported release. */
5035 if (qemu_uname_release && *qemu_uname_release)
5036 strcpy (buf->release, qemu_uname_release);
5038 unlock_user_struct(buf, arg1, 1);
5042 case TARGET_NR_modify_ldt:
5043 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5045 #if !defined(TARGET_X86_64)
5046 case TARGET_NR_vm86old:
5048 case TARGET_NR_vm86:
5049 ret = do_vm86(cpu_env, arg1, arg2);
5053 case TARGET_NR_adjtimex:
5055 #ifdef TARGET_NR_create_module
5056 case TARGET_NR_create_module:
5058 case TARGET_NR_init_module:
5059 case TARGET_NR_delete_module:
5060 #ifdef TARGET_NR_get_kernel_syms
5061 case TARGET_NR_get_kernel_syms:
5064 case TARGET_NR_quotactl:
5066 case TARGET_NR_getpgid:
5067 ret = get_errno(getpgid(arg1));
5069 case TARGET_NR_fchdir:
5070 ret = get_errno(fchdir(arg1));
5072 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5073 case TARGET_NR_bdflush:
5076 #ifdef TARGET_NR_sysfs
5077 case TARGET_NR_sysfs:
5080 case TARGET_NR_personality:
5081 ret = get_errno(personality(arg1));
5083 #ifdef TARGET_NR_afs_syscall
5084 case TARGET_NR_afs_syscall:
5087 #ifdef TARGET_NR__llseek /* Not on alpha */
5088 case TARGET_NR__llseek:
5090 #if defined (__x86_64__)
5091 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5092 if (put_user_s64(ret, arg4))
5096 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5097 if (put_user_s64(res, arg4))
5103 case TARGET_NR_getdents:
5104 #if TARGET_ABI_BITS != 32
5106 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5108 struct target_dirent *target_dirp;
5109 struct linux_dirent *dirp;
5110 abi_long count = arg3;
5112 dirp = malloc(count);
5114 ret = -TARGET_ENOMEM;
5118 ret = get_errno(sys_getdents(arg1, dirp, count));
5119 if (!is_error(ret)) {
5120 struct linux_dirent *de;
5121 struct target_dirent *tde;
5123 int reclen, treclen;
5124 int count1, tnamelen;
5128 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5132 reclen = de->d_reclen;
5133 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5134 tde->d_reclen = tswap16(treclen);
5135 tde->d_ino = tswapl(de->d_ino);
5136 tde->d_off = tswapl(de->d_off);
5137 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5140 /* XXX: may not be correct */
5141 pstrcpy(tde->d_name, tnamelen, de->d_name);
5142 de = (struct linux_dirent *)((char *)de + reclen);
5144 tde = (struct target_dirent *)((char *)tde + treclen);
5148 unlock_user(target_dirp, arg2, ret);
5154 struct linux_dirent *dirp;
5155 abi_long count = arg3;
5157 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5159 ret = get_errno(sys_getdents(arg1, dirp, count));
5160 if (!is_error(ret)) {
5161 struct linux_dirent *de;
5166 reclen = de->d_reclen;
5169 de->d_reclen = tswap16(reclen);
5170 tswapls(&de->d_ino);
5171 tswapls(&de->d_off);
5172 de = (struct linux_dirent *)((char *)de + reclen);
5176 unlock_user(dirp, arg2, ret);
5180 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5181 case TARGET_NR_getdents64:
5183 struct linux_dirent64 *dirp;
5184 abi_long count = arg3;
5185 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5187 ret = get_errno(sys_getdents64(arg1, dirp, count));
5188 if (!is_error(ret)) {
5189 struct linux_dirent64 *de;
5194 reclen = de->d_reclen;
5197 de->d_reclen = tswap16(reclen);
5198 tswap64s((uint64_t *)&de->d_ino);
5199 tswap64s((uint64_t *)&de->d_off);
5200 de = (struct linux_dirent64 *)((char *)de + reclen);
5204 unlock_user(dirp, arg2, ret);
5207 #endif /* TARGET_NR_getdents64 */
5208 #ifdef TARGET_NR__newselect
5209 case TARGET_NR__newselect:
5210 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5213 #ifdef TARGET_NR_poll
5214 case TARGET_NR_poll:
5216 struct target_pollfd *target_pfd;
5217 unsigned int nfds = arg2;
5222 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5225 pfd = alloca(sizeof(struct pollfd) * nfds);
5226 for(i = 0; i < nfds; i++) {
5227 pfd[i].fd = tswap32(target_pfd[i].fd);
5228 pfd[i].events = tswap16(target_pfd[i].events);
5230 ret = get_errno(poll(pfd, nfds, timeout));
5231 if (!is_error(ret)) {
5232 for(i = 0; i < nfds; i++) {
5233 target_pfd[i].revents = tswap16(pfd[i].revents);
5235 ret += nfds * (sizeof(struct target_pollfd)
5236 - sizeof(struct pollfd));
5238 unlock_user(target_pfd, arg1, ret);
5242 case TARGET_NR_flock:
5243 /* NOTE: the flock constant seems to be the same for every
5245 ret = get_errno(flock(arg1, arg2));
5247 case TARGET_NR_readv:
5252 vec = alloca(count * sizeof(struct iovec));
5253 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5255 ret = get_errno(readv(arg1, vec, count));
5256 unlock_iovec(vec, arg2, count, 1);
5259 case TARGET_NR_writev:
5264 vec = alloca(count * sizeof(struct iovec));
5265 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5267 ret = get_errno(writev(arg1, vec, count));
5268 unlock_iovec(vec, arg2, count, 0);
5271 case TARGET_NR_getsid:
5272 ret = get_errno(getsid(arg1));
5274 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5275 case TARGET_NR_fdatasync:
5276 ret = get_errno(fdatasync(arg1));
5279 case TARGET_NR__sysctl:
5280 /* We don't implement this, but ENOTDIR is always a safe
5282 ret = -TARGET_ENOTDIR;
5284 case TARGET_NR_sched_setparam:
5286 struct sched_param *target_schp;
5287 struct sched_param schp;
5289 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5291 schp.sched_priority = tswap32(target_schp->sched_priority);
5292 unlock_user_struct(target_schp, arg2, 0);
5293 ret = get_errno(sched_setparam(arg1, &schp));
5296 case TARGET_NR_sched_getparam:
5298 struct sched_param *target_schp;
5299 struct sched_param schp;
5300 ret = get_errno(sched_getparam(arg1, &schp));
5301 if (!is_error(ret)) {
5302 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5304 target_schp->sched_priority = tswap32(schp.sched_priority);
5305 unlock_user_struct(target_schp, arg2, 1);
5309 case TARGET_NR_sched_setscheduler:
5311 struct sched_param *target_schp;
5312 struct sched_param schp;
5313 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5315 schp.sched_priority = tswap32(target_schp->sched_priority);
5316 unlock_user_struct(target_schp, arg3, 0);
5317 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5320 case TARGET_NR_sched_getscheduler:
5321 ret = get_errno(sched_getscheduler(arg1));
5323 case TARGET_NR_sched_yield:
5324 ret = get_errno(sched_yield());
5326 case TARGET_NR_sched_get_priority_max:
5327 ret = get_errno(sched_get_priority_max(arg1));
5329 case TARGET_NR_sched_get_priority_min:
5330 ret = get_errno(sched_get_priority_min(arg1));
5332 case TARGET_NR_sched_rr_get_interval:
5335 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5336 if (!is_error(ret)) {
5337 host_to_target_timespec(arg2, &ts);
5341 case TARGET_NR_nanosleep:
5343 struct timespec req, rem;
5344 target_to_host_timespec(&req, arg1);
5345 ret = get_errno(nanosleep(&req, &rem));
5346 if (is_error(ret) && arg2) {
5347 host_to_target_timespec(arg2, &rem);
5351 #ifdef TARGET_NR_query_module
5352 case TARGET_NR_query_module:
5355 #ifdef TARGET_NR_nfsservctl
5356 case TARGET_NR_nfsservctl:
5359 case TARGET_NR_prctl:
5362 case PR_GET_PDEATHSIG:
5365 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5366 if (!is_error(ret) && arg2
5367 && put_user_ual(deathsig, arg2))
5372 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5376 #ifdef TARGET_NR_arch_prctl
5377 case TARGET_NR_arch_prctl:
5378 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5379 ret = do_arch_prctl(cpu_env, arg1, arg2);
5385 #ifdef TARGET_NR_pread
5386 case TARGET_NR_pread:
5388 if (((CPUARMState *)cpu_env)->eabi)
5391 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5393 ret = get_errno(pread(arg1, p, arg3, arg4));
5394 unlock_user(p, arg2, ret);
5396 case TARGET_NR_pwrite:
5398 if (((CPUARMState *)cpu_env)->eabi)
5401 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5403 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5404 unlock_user(p, arg2, 0);
5407 #ifdef TARGET_NR_pread64
5408 case TARGET_NR_pread64:
5409 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5411 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5412 unlock_user(p, arg2, ret);
5414 case TARGET_NR_pwrite64:
5415 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5417 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5418 unlock_user(p, arg2, 0);
5421 case TARGET_NR_getcwd:
5422 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5424 ret = get_errno(sys_getcwd1(p, arg2));
5425 unlock_user(p, arg1, ret);
5427 case TARGET_NR_capget:
5429 case TARGET_NR_capset:
5431 case TARGET_NR_sigaltstack:
5432 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5433 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5434 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5439 case TARGET_NR_sendfile:
5441 #ifdef TARGET_NR_getpmsg
5442 case TARGET_NR_getpmsg:
5445 #ifdef TARGET_NR_putpmsg
5446 case TARGET_NR_putpmsg:
5449 #ifdef TARGET_NR_vfork
5450 case TARGET_NR_vfork:
5451 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5455 #ifdef TARGET_NR_ugetrlimit
5456 case TARGET_NR_ugetrlimit:
5459 ret = get_errno(getrlimit(arg1, &rlim));
5460 if (!is_error(ret)) {
5461 struct target_rlimit *target_rlim;
5462 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5464 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5465 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5466 unlock_user_struct(target_rlim, arg2, 1);
5471 #ifdef TARGET_NR_truncate64
5472 case TARGET_NR_truncate64:
5473 if (!(p = lock_user_string(arg1)))
5475 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5476 unlock_user(p, arg1, 0);
5479 #ifdef TARGET_NR_ftruncate64
5480 case TARGET_NR_ftruncate64:
5481 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5484 #ifdef TARGET_NR_stat64
5485 case TARGET_NR_stat64:
5486 if (!(p = lock_user_string(arg1)))
5488 ret = get_errno(stat(path(p), &st));
5489 unlock_user(p, arg1, 0);
5491 ret = host_to_target_stat64(cpu_env, arg2, &st);
5494 #ifdef TARGET_NR_lstat64
5495 case TARGET_NR_lstat64:
5496 if (!(p = lock_user_string(arg1)))
5498 ret = get_errno(lstat(path(p), &st));
5499 unlock_user(p, arg1, 0);
5501 ret = host_to_target_stat64(cpu_env, arg2, &st);
5504 #ifdef TARGET_NR_fstat64
5505 case TARGET_NR_fstat64:
5506 ret = get_errno(fstat(arg1, &st));
5508 ret = host_to_target_stat64(cpu_env, arg2, &st);
5511 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5512 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5513 #ifdef TARGET_NR_fstatat64
5514 case TARGET_NR_fstatat64:
5516 #ifdef TARGET_NR_newfstatat
5517 case TARGET_NR_newfstatat:
5519 if (!(p = lock_user_string(arg2)))
5521 #ifdef __NR_fstatat64
5522 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5524 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
5527 ret = host_to_target_stat64(cpu_env, arg3, &st);
5531 case TARGET_NR_lchown:
5532 if (!(p = lock_user_string(arg1)))
5534 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5535 unlock_user(p, arg1, 0);
5537 case TARGET_NR_getuid:
5538 ret = get_errno(high2lowuid(getuid()));
5540 case TARGET_NR_getgid:
5541 ret = get_errno(high2lowgid(getgid()));
5543 case TARGET_NR_geteuid:
5544 ret = get_errno(high2lowuid(geteuid()));
5546 case TARGET_NR_getegid:
5547 ret = get_errno(high2lowgid(getegid()));
5549 case TARGET_NR_setreuid:
5550 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5552 case TARGET_NR_setregid:
5553 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5555 case TARGET_NR_getgroups:
5557 int gidsetsize = arg1;
5558 uint16_t *target_grouplist;
5562 grouplist = alloca(gidsetsize * sizeof(gid_t));
5563 ret = get_errno(getgroups(gidsetsize, grouplist));
5564 if (gidsetsize == 0)
5566 if (!is_error(ret)) {
5567 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5568 if (!target_grouplist)
5570 for(i = 0;i < ret; i++)
5571 target_grouplist[i] = tswap16(grouplist[i]);
5572 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5576 case TARGET_NR_setgroups:
5578 int gidsetsize = arg1;
5579 uint16_t *target_grouplist;
5583 grouplist = alloca(gidsetsize * sizeof(gid_t));
5584 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5585 if (!target_grouplist) {
5586 ret = -TARGET_EFAULT;
5589 for(i = 0;i < gidsetsize; i++)
5590 grouplist[i] = tswap16(target_grouplist[i]);
5591 unlock_user(target_grouplist, arg2, 0);
5592 ret = get_errno(setgroups(gidsetsize, grouplist));
5595 case TARGET_NR_fchown:
5596 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5598 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5599 case TARGET_NR_fchownat:
5600 if (!(p = lock_user_string(arg2)))
5602 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5603 unlock_user(p, arg2, 0);
5606 #ifdef TARGET_NR_setresuid
5607 case TARGET_NR_setresuid:
5608 ret = get_errno(setresuid(low2highuid(arg1),
5610 low2highuid(arg3)));
5613 #ifdef TARGET_NR_getresuid
5614 case TARGET_NR_getresuid:
5616 uid_t ruid, euid, suid;
5617 ret = get_errno(getresuid(&ruid, &euid, &suid));
5618 if (!is_error(ret)) {
5619 if (put_user_u16(high2lowuid(ruid), arg1)
5620 || put_user_u16(high2lowuid(euid), arg2)
5621 || put_user_u16(high2lowuid(suid), arg3))
5627 #ifdef TARGET_NR_getresgid
5628 case TARGET_NR_setresgid:
5629 ret = get_errno(setresgid(low2highgid(arg1),
5631 low2highgid(arg3)));
5634 #ifdef TARGET_NR_getresgid
5635 case TARGET_NR_getresgid:
5637 gid_t rgid, egid, sgid;
5638 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5639 if (!is_error(ret)) {
5640 if (put_user_u16(high2lowgid(rgid), arg1)
5641 || put_user_u16(high2lowgid(egid), arg2)
5642 || put_user_u16(high2lowgid(sgid), arg3))
5648 case TARGET_NR_chown:
5649 if (!(p = lock_user_string(arg1)))
5651 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5652 unlock_user(p, arg1, 0);
5654 case TARGET_NR_setuid:
5655 ret = get_errno(setuid(low2highuid(arg1)));
5657 case TARGET_NR_setgid:
5658 ret = get_errno(setgid(low2highgid(arg1)));
5660 case TARGET_NR_setfsuid:
5661 ret = get_errno(setfsuid(arg1));
5663 case TARGET_NR_setfsgid:
5664 ret = get_errno(setfsgid(arg1));
5666 #endif /* USE_UID16 */
5668 #ifdef TARGET_NR_lchown32
5669 case TARGET_NR_lchown32:
5670 if (!(p = lock_user_string(arg1)))
5672 ret = get_errno(lchown(p, arg2, arg3));
5673 unlock_user(p, arg1, 0);
5676 #ifdef TARGET_NR_getuid32
5677 case TARGET_NR_getuid32:
5678 ret = get_errno(getuid());
5682 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5683 /* Alpha specific */
5684 case TARGET_NR_getxuid:
5688 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
5690 ret = get_errno(getuid());
5693 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5694 /* Alpha specific */
5695 case TARGET_NR_getxgid:
5699 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
5701 ret = get_errno(getgid());
5705 #ifdef TARGET_NR_getgid32
5706 case TARGET_NR_getgid32:
5707 ret = get_errno(getgid());
5710 #ifdef TARGET_NR_geteuid32
5711 case TARGET_NR_geteuid32:
5712 ret = get_errno(geteuid());
5715 #ifdef TARGET_NR_getegid32
5716 case TARGET_NR_getegid32:
5717 ret = get_errno(getegid());
5720 #ifdef TARGET_NR_setreuid32
5721 case TARGET_NR_setreuid32:
5722 ret = get_errno(setreuid(arg1, arg2));
5725 #ifdef TARGET_NR_setregid32
5726 case TARGET_NR_setregid32:
5727 ret = get_errno(setregid(arg1, arg2));
5730 #ifdef TARGET_NR_getgroups32
5731 case TARGET_NR_getgroups32:
5733 int gidsetsize = arg1;
5734 uint32_t *target_grouplist;
5738 grouplist = alloca(gidsetsize * sizeof(gid_t));
5739 ret = get_errno(getgroups(gidsetsize, grouplist));
5740 if (gidsetsize == 0)
5742 if (!is_error(ret)) {
5743 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
5744 if (!target_grouplist) {
5745 ret = -TARGET_EFAULT;
5748 for(i = 0;i < ret; i++)
5749 target_grouplist[i] = tswap32(grouplist[i]);
5750 unlock_user(target_grouplist, arg2, gidsetsize * 4);
5755 #ifdef TARGET_NR_setgroups32
5756 case TARGET_NR_setgroups32:
5758 int gidsetsize = arg1;
5759 uint32_t *target_grouplist;
5763 grouplist = alloca(gidsetsize * sizeof(gid_t));
5764 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
5765 if (!target_grouplist) {
5766 ret = -TARGET_EFAULT;
5769 for(i = 0;i < gidsetsize; i++)
5770 grouplist[i] = tswap32(target_grouplist[i]);
5771 unlock_user(target_grouplist, arg2, 0);
5772 ret = get_errno(setgroups(gidsetsize, grouplist));
5776 #ifdef TARGET_NR_fchown32
5777 case TARGET_NR_fchown32:
5778 ret = get_errno(fchown(arg1, arg2, arg3));
5781 #ifdef TARGET_NR_setresuid32
5782 case TARGET_NR_setresuid32:
5783 ret = get_errno(setresuid(arg1, arg2, arg3));
5786 #ifdef TARGET_NR_getresuid32
5787 case TARGET_NR_getresuid32:
5789 uid_t ruid, euid, suid;
5790 ret = get_errno(getresuid(&ruid, &euid, &suid));
5791 if (!is_error(ret)) {
5792 if (put_user_u32(ruid, arg1)
5793 || put_user_u32(euid, arg2)
5794 || put_user_u32(suid, arg3))
5800 #ifdef TARGET_NR_setresgid32
5801 case TARGET_NR_setresgid32:
5802 ret = get_errno(setresgid(arg1, arg2, arg3));
5805 #ifdef TARGET_NR_getresgid32
5806 case TARGET_NR_getresgid32:
5808 gid_t rgid, egid, sgid;
5809 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5810 if (!is_error(ret)) {
5811 if (put_user_u32(rgid, arg1)
5812 || put_user_u32(egid, arg2)
5813 || put_user_u32(sgid, arg3))
5819 #ifdef TARGET_NR_chown32
5820 case TARGET_NR_chown32:
5821 if (!(p = lock_user_string(arg1)))
5823 ret = get_errno(chown(p, arg2, arg3));
5824 unlock_user(p, arg1, 0);
5827 #ifdef TARGET_NR_setuid32
5828 case TARGET_NR_setuid32:
5829 ret = get_errno(setuid(arg1));
5832 #ifdef TARGET_NR_setgid32
5833 case TARGET_NR_setgid32:
5834 ret = get_errno(setgid(arg1));
5837 #ifdef TARGET_NR_setfsuid32
5838 case TARGET_NR_setfsuid32:
5839 ret = get_errno(setfsuid(arg1));
5842 #ifdef TARGET_NR_setfsgid32
5843 case TARGET_NR_setfsgid32:
5844 ret = get_errno(setfsgid(arg1));
5848 case TARGET_NR_pivot_root:
5850 #ifdef TARGET_NR_mincore
5851 case TARGET_NR_mincore:
5854 ret = -TARGET_EFAULT;
5855 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
5857 if (!(p = lock_user_string(arg3)))
5859 ret = get_errno(mincore(a, arg2, p));
5860 unlock_user(p, arg3, ret);
5862 unlock_user(a, arg1, 0);
5866 #ifdef TARGET_NR_arm_fadvise64_64
5867 case TARGET_NR_arm_fadvise64_64:
5870 * arm_fadvise64_64 looks like fadvise64_64 but
5871 * with different argument order
5879 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
5880 #ifdef TARGET_NR_fadvise64_64
5881 case TARGET_NR_fadvise64_64:
5883 /* This is a hint, so ignoring and returning success is ok. */
5887 #ifdef TARGET_NR_madvise
5888 case TARGET_NR_madvise:
5889 /* A straight passthrough may not be safe because qemu sometimes
5890 turns private flie-backed mappings into anonymous mappings.
5891 This will break MADV_DONTNEED.
5892 This is a hint, so ignoring and returning success is ok. */
5896 #if TARGET_ABI_BITS == 32
5897 case TARGET_NR_fcntl64:
5901 struct target_flock64 *target_fl;
5903 struct target_eabi_flock64 *target_efl;
5907 case TARGET_F_GETLK64:
5910 case TARGET_F_SETLK64:
5913 case TARGET_F_SETLKW64:
5922 case TARGET_F_GETLK64:
5924 if (((CPUARMState *)cpu_env)->eabi) {
5925 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5927 fl.l_type = tswap16(target_efl->l_type);
5928 fl.l_whence = tswap16(target_efl->l_whence);
5929 fl.l_start = tswap64(target_efl->l_start);
5930 fl.l_len = tswap64(target_efl->l_len);
5931 fl.l_pid = tswapl(target_efl->l_pid);
5932 unlock_user_struct(target_efl, arg3, 0);
5936 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5938 fl.l_type = tswap16(target_fl->l_type);
5939 fl.l_whence = tswap16(target_fl->l_whence);
5940 fl.l_start = tswap64(target_fl->l_start);
5941 fl.l_len = tswap64(target_fl->l_len);
5942 fl.l_pid = tswapl(target_fl->l_pid);
5943 unlock_user_struct(target_fl, arg3, 0);
5945 ret = get_errno(fcntl(arg1, cmd, &fl));
5948 if (((CPUARMState *)cpu_env)->eabi) {
5949 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
5951 target_efl->l_type = tswap16(fl.l_type);
5952 target_efl->l_whence = tswap16(fl.l_whence);
5953 target_efl->l_start = tswap64(fl.l_start);
5954 target_efl->l_len = tswap64(fl.l_len);
5955 target_efl->l_pid = tswapl(fl.l_pid);
5956 unlock_user_struct(target_efl, arg3, 1);
5960 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
5962 target_fl->l_type = tswap16(fl.l_type);
5963 target_fl->l_whence = tswap16(fl.l_whence);
5964 target_fl->l_start = tswap64(fl.l_start);
5965 target_fl->l_len = tswap64(fl.l_len);
5966 target_fl->l_pid = tswapl(fl.l_pid);
5967 unlock_user_struct(target_fl, arg3, 1);
5972 case TARGET_F_SETLK64:
5973 case TARGET_F_SETLKW64:
5975 if (((CPUARMState *)cpu_env)->eabi) {
5976 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5978 fl.l_type = tswap16(target_efl->l_type);
5979 fl.l_whence = tswap16(target_efl->l_whence);
5980 fl.l_start = tswap64(target_efl->l_start);
5981 fl.l_len = tswap64(target_efl->l_len);
5982 fl.l_pid = tswapl(target_efl->l_pid);
5983 unlock_user_struct(target_efl, arg3, 0);
5987 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5989 fl.l_type = tswap16(target_fl->l_type);
5990 fl.l_whence = tswap16(target_fl->l_whence);
5991 fl.l_start = tswap64(target_fl->l_start);
5992 fl.l_len = tswap64(target_fl->l_len);
5993 fl.l_pid = tswapl(target_fl->l_pid);
5994 unlock_user_struct(target_fl, arg3, 0);
5996 ret = get_errno(fcntl(arg1, cmd, &fl));
5999 ret = do_fcntl(arg1, cmd, arg3);
6005 #ifdef TARGET_NR_cacheflush
6006 case TARGET_NR_cacheflush:
6007 /* self-modifying code is handled automatically, so nothing needed */
6011 #ifdef TARGET_NR_security
6012 case TARGET_NR_security:
6015 #ifdef TARGET_NR_getpagesize
6016 case TARGET_NR_getpagesize:
6017 ret = TARGET_PAGE_SIZE;
6020 case TARGET_NR_gettid:
6021 ret = get_errno(gettid());
6023 #ifdef TARGET_NR_readahead
6024 case TARGET_NR_readahead:
6025 #if TARGET_ABI_BITS == 32
6027 if (((CPUARMState *)cpu_env)->eabi)
6034 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6036 ret = get_errno(readahead(arg1, arg2, arg3));
6040 #ifdef TARGET_NR_setxattr
6041 case TARGET_NR_setxattr:
6042 case TARGET_NR_lsetxattr:
6043 case TARGET_NR_fsetxattr:
6044 case TARGET_NR_getxattr:
6045 case TARGET_NR_lgetxattr:
6046 case TARGET_NR_fgetxattr:
6047 case TARGET_NR_listxattr:
6048 case TARGET_NR_llistxattr:
6049 case TARGET_NR_flistxattr:
6050 case TARGET_NR_removexattr:
6051 case TARGET_NR_lremovexattr:
6052 case TARGET_NR_fremovexattr:
6053 goto unimplemented_nowarn;
6055 #ifdef TARGET_NR_set_thread_area
6056 case TARGET_NR_set_thread_area:
6057 #if defined(TARGET_MIPS)
6058 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6061 #elif defined(TARGET_CRIS)
6063 ret = -TARGET_EINVAL;
6065 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6069 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6070 ret = do_set_thread_area(cpu_env, arg1);
6073 goto unimplemented_nowarn;
6076 #ifdef TARGET_NR_get_thread_area
6077 case TARGET_NR_get_thread_area:
6078 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6079 ret = do_get_thread_area(cpu_env, arg1);
6081 goto unimplemented_nowarn;
6084 #ifdef TARGET_NR_getdomainname
6085 case TARGET_NR_getdomainname:
6086 goto unimplemented_nowarn;
6089 #ifdef TARGET_NR_clock_gettime
6090 case TARGET_NR_clock_gettime:
6093 ret = get_errno(clock_gettime(arg1, &ts));
6094 if (!is_error(ret)) {
6095 host_to_target_timespec(arg2, &ts);
6100 #ifdef TARGET_NR_clock_getres
6101 case TARGET_NR_clock_getres:
6104 ret = get_errno(clock_getres(arg1, &ts));
6105 if (!is_error(ret)) {
6106 host_to_target_timespec(arg2, &ts);
6111 #ifdef TARGET_NR_clock_nanosleep
6112 case TARGET_NR_clock_nanosleep:
6115 target_to_host_timespec(&ts, arg3);
6116 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6118 host_to_target_timespec(arg4, &ts);
6123 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6124 case TARGET_NR_set_tid_address:
6125 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6129 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6130 case TARGET_NR_tkill:
6131 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6135 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6136 case TARGET_NR_tgkill:
6137 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6138 target_to_host_signal(arg3)));
6142 #ifdef TARGET_NR_set_robust_list
6143 case TARGET_NR_set_robust_list:
6144 goto unimplemented_nowarn;
6147 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6148 case TARGET_NR_utimensat:
6150 struct timespec ts[2];
6151 target_to_host_timespec(ts, arg3);
6152 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6154 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6156 if (!(p = lock_user_string(arg2))) {
6157 ret = -TARGET_EFAULT;
6160 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6161 unlock_user(p, arg2, 0);
6166 #if defined(USE_NPTL)
6167 case TARGET_NR_futex:
6168 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6171 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6172 case TARGET_NR_inotify_init:
6173 ret = get_errno(sys_inotify_init());
6176 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6177 case TARGET_NR_inotify_add_watch:
6178 p = lock_user_string(arg2);
6179 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6180 unlock_user(p, arg2, 0);
6183 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6184 case TARGET_NR_inotify_rm_watch:
6185 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6189 #ifdef TARGET_NR_mq_open
6190 case TARGET_NR_mq_open:
6192 struct mq_attr posix_mq_attr;
6194 p = lock_user_string(arg1 - 1);
6196 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6197 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6198 unlock_user (p, arg1, 0);
6202 case TARGET_NR_mq_unlink:
6203 p = lock_user_string(arg1 - 1);
6204 ret = get_errno(mq_unlink(p));
6205 unlock_user (p, arg1, 0);
6208 case TARGET_NR_mq_timedsend:
6212 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6214 target_to_host_timespec(&ts, arg5);
6215 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6216 host_to_target_timespec(arg5, &ts);
6219 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6220 unlock_user (p, arg2, arg3);
6224 case TARGET_NR_mq_timedreceive:
6229 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6231 target_to_host_timespec(&ts, arg5);
6232 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6233 host_to_target_timespec(arg5, &ts);
6236 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6237 unlock_user (p, arg2, arg3);
6239 put_user_u32(prio, arg4);
6243 /* Not implemented for now... */
6244 /* case TARGET_NR_mq_notify: */
6247 case TARGET_NR_mq_getsetattr:
6249 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6252 ret = mq_getattr(arg1, &posix_mq_attr_out);
6253 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6256 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6257 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6266 gemu_log("qemu: Unsupported syscall: %d\n", num);
6267 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6268 unimplemented_nowarn:
6270 ret = -TARGET_ENOSYS;
6275 gemu_log(" = %ld\n", ret);
6278 print_syscall_ret(num, ret);
6281 ret = -TARGET_EFAULT;