4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
49 #include <sys/times.h>
52 #include <sys/statfs.h>
54 #include <sys/sysinfo.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #define termios host_termios
64 #define winsize host_winsize
65 #define termio host_termio
66 #define sgttyb host_sgttyb /* same as target */
67 #define tchars host_tchars /* same as target */
68 #define ltchars host_ltchars /* same as target */
70 #include <linux/termios.h>
71 #include <linux/unistd.h>
72 #include <linux/utsname.h>
73 #include <linux/cdrom.h>
74 #include <linux/hdreg.h>
75 #include <linux/soundcard.h>
77 #include <linux/mtio.h>
78 #include "linux_loop.h"
81 #include "qemu-common.h"
84 #include <linux/futex.h>
85 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
86 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
88 /* XXX: Hardcode the above values. */
89 #define CLONE_NPTL_FLAGS2 0
94 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
95 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
96 /* 16 bit uid wrappers emulation */
100 //#include <linux/msdos_fs.h>
101 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
102 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113 #define _syscall0(type,name) \
114 static type name (void) \
116 return syscall(__NR_##name); \
119 #define _syscall1(type,name,type1,arg1) \
120 static type name (type1 arg1) \
122 return syscall(__NR_##name, arg1); \
125 #define _syscall2(type,name,type1,arg1,type2,arg2) \
126 static type name (type1 arg1,type2 arg2) \
128 return syscall(__NR_##name, arg1, arg2); \
131 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
132 static type name (type1 arg1,type2 arg2,type3 arg3) \
134 return syscall(__NR_##name, arg1, arg2, arg3); \
137 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
138 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
140 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
143 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
151 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 type5,arg5,type6,arg6) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
156 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
160 #define __NR_sys_exit __NR_exit
161 #define __NR_sys_uname __NR_uname
162 #define __NR_sys_faccessat __NR_faccessat
163 #define __NR_sys_fchmodat __NR_fchmodat
164 #define __NR_sys_fchownat __NR_fchownat
165 #define __NR_sys_fstatat64 __NR_fstatat64
166 #define __NR_sys_futimesat __NR_futimesat
167 #define __NR_sys_getcwd1 __NR_getcwd
168 #define __NR_sys_getdents __NR_getdents
169 #define __NR_sys_getdents64 __NR_getdents64
170 #define __NR_sys_getpriority __NR_getpriority
171 #define __NR_sys_linkat __NR_linkat
172 #define __NR_sys_mkdirat __NR_mkdirat
173 #define __NR_sys_mknodat __NR_mknodat
174 #define __NR_sys_newfstatat __NR_newfstatat
175 #define __NR_sys_openat __NR_openat
176 #define __NR_sys_readlinkat __NR_readlinkat
177 #define __NR_sys_renameat __NR_renameat
178 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
179 #define __NR_sys_symlinkat __NR_symlinkat
180 #define __NR_sys_syslog __NR_syslog
181 #define __NR_sys_tgkill __NR_tgkill
182 #define __NR_sys_tkill __NR_tkill
183 #define __NR_sys_unlinkat __NR_unlinkat
184 #define __NR_sys_utimensat __NR_utimensat
185 #define __NR_sys_futex __NR_futex
186 #define __NR_sys_inotify_init __NR_inotify_init
187 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
188 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
190 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
191 #define __NR__llseek __NR_lseek
195 _syscall0(int, gettid)
197 /* This is a replacement for the host gettid() and must return a host
199 static int gettid(void) {
203 _syscall1(int,sys_exit,int,status)
204 _syscall1(int,sys_uname,struct new_utsname *,buf)
205 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
206 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
208 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
209 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
210 mode_t,mode,int,flags)
212 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
213 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
214 uid_t,owner,gid_t,group,int,flags)
216 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
217 defined(__NR_fstatat64)
218 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
219 struct stat *,buf,int,flags)
221 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
222 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
223 const struct timeval *,times)
225 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
226 #if TARGET_ABI_BITS == 32
227 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
229 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
230 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
232 _syscall2(int, sys_getpriority, int, which, int, who);
233 #if !defined (__x86_64__)
234 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
235 loff_t *, res, uint, wh);
237 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
238 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
239 int,newdirfd,const char *,newpath,int,flags)
241 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
242 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
244 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
245 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
246 mode_t,mode,dev_t,dev)
248 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
249 defined(__NR_newfstatat)
250 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
251 struct stat *,buf,int,flags)
253 #if defined(TARGET_NR_openat) && defined(__NR_openat)
254 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
256 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
257 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
258 char *,buf,size_t,bufsize)
260 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
261 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
262 int,newdirfd,const char *,newpath)
264 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
265 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
266 _syscall3(int,sys_symlinkat,const char *,oldpath,
267 int,newdirfd,const char *,newpath)
269 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
270 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
271 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
273 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
274 _syscall2(int,sys_tkill,int,tid,int,sig)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group,int,error_code)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address,int *,tidptr)
282 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
283 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
285 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
286 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
287 const struct timespec *,tsp,int,flags)
289 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
290 _syscall0(int,sys_inotify_init)
292 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
293 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
295 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
296 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
298 #if defined(USE_NPTL)
299 #if defined(TARGET_NR_futex) && defined(__NR_futex)
300 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
301 const struct timespec *,timeout,int *,uaddr2,int,val3)
305 extern int personality(int);
306 extern int flock(int, int);
307 extern int setfsuid(int);
308 extern int setfsgid(int);
309 extern int setgroups(int, gid_t *);
311 #define ERRNO_TABLE_SIZE 1200
313 /* target_to_host_errno_table[] is initialized from
314 * host_to_target_errno_table[] in syscall_init(). */
315 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
319 * This list is the union of errno values overridden in asm-<arch>/errno.h
320 * minus the errnos that are not actually generic to all archs.
322 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
323 [EIDRM] = TARGET_EIDRM,
324 [ECHRNG] = TARGET_ECHRNG,
325 [EL2NSYNC] = TARGET_EL2NSYNC,
326 [EL3HLT] = TARGET_EL3HLT,
327 [EL3RST] = TARGET_EL3RST,
328 [ELNRNG] = TARGET_ELNRNG,
329 [EUNATCH] = TARGET_EUNATCH,
330 [ENOCSI] = TARGET_ENOCSI,
331 [EL2HLT] = TARGET_EL2HLT,
332 [EDEADLK] = TARGET_EDEADLK,
333 [ENOLCK] = TARGET_ENOLCK,
334 [EBADE] = TARGET_EBADE,
335 [EBADR] = TARGET_EBADR,
336 [EXFULL] = TARGET_EXFULL,
337 [ENOANO] = TARGET_ENOANO,
338 [EBADRQC] = TARGET_EBADRQC,
339 [EBADSLT] = TARGET_EBADSLT,
340 [EBFONT] = TARGET_EBFONT,
341 [ENOSTR] = TARGET_ENOSTR,
342 [ENODATA] = TARGET_ENODATA,
343 [ETIME] = TARGET_ETIME,
344 [ENOSR] = TARGET_ENOSR,
345 [ENONET] = TARGET_ENONET,
346 [ENOPKG] = TARGET_ENOPKG,
347 [EREMOTE] = TARGET_EREMOTE,
348 [ENOLINK] = TARGET_ENOLINK,
349 [EADV] = TARGET_EADV,
350 [ESRMNT] = TARGET_ESRMNT,
351 [ECOMM] = TARGET_ECOMM,
352 [EPROTO] = TARGET_EPROTO,
353 [EDOTDOT] = TARGET_EDOTDOT,
354 [EMULTIHOP] = TARGET_EMULTIHOP,
355 [EBADMSG] = TARGET_EBADMSG,
356 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
357 [EOVERFLOW] = TARGET_EOVERFLOW,
358 [ENOTUNIQ] = TARGET_ENOTUNIQ,
359 [EBADFD] = TARGET_EBADFD,
360 [EREMCHG] = TARGET_EREMCHG,
361 [ELIBACC] = TARGET_ELIBACC,
362 [ELIBBAD] = TARGET_ELIBBAD,
363 [ELIBSCN] = TARGET_ELIBSCN,
364 [ELIBMAX] = TARGET_ELIBMAX,
365 [ELIBEXEC] = TARGET_ELIBEXEC,
366 [EILSEQ] = TARGET_EILSEQ,
367 [ENOSYS] = TARGET_ENOSYS,
368 [ELOOP] = TARGET_ELOOP,
369 [ERESTART] = TARGET_ERESTART,
370 [ESTRPIPE] = TARGET_ESTRPIPE,
371 [ENOTEMPTY] = TARGET_ENOTEMPTY,
372 [EUSERS] = TARGET_EUSERS,
373 [ENOTSOCK] = TARGET_ENOTSOCK,
374 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
375 [EMSGSIZE] = TARGET_EMSGSIZE,
376 [EPROTOTYPE] = TARGET_EPROTOTYPE,
377 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
378 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
379 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
380 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
381 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
382 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
383 [EADDRINUSE] = TARGET_EADDRINUSE,
384 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
385 [ENETDOWN] = TARGET_ENETDOWN,
386 [ENETUNREACH] = TARGET_ENETUNREACH,
387 [ENETRESET] = TARGET_ENETRESET,
388 [ECONNABORTED] = TARGET_ECONNABORTED,
389 [ECONNRESET] = TARGET_ECONNRESET,
390 [ENOBUFS] = TARGET_ENOBUFS,
391 [EISCONN] = TARGET_EISCONN,
392 [ENOTCONN] = TARGET_ENOTCONN,
393 [EUCLEAN] = TARGET_EUCLEAN,
394 [ENOTNAM] = TARGET_ENOTNAM,
395 [ENAVAIL] = TARGET_ENAVAIL,
396 [EISNAM] = TARGET_EISNAM,
397 [EREMOTEIO] = TARGET_EREMOTEIO,
398 [ESHUTDOWN] = TARGET_ESHUTDOWN,
399 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
400 [ETIMEDOUT] = TARGET_ETIMEDOUT,
401 [ECONNREFUSED] = TARGET_ECONNREFUSED,
402 [EHOSTDOWN] = TARGET_EHOSTDOWN,
403 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
404 [EALREADY] = TARGET_EALREADY,
405 [EINPROGRESS] = TARGET_EINPROGRESS,
406 [ESTALE] = TARGET_ESTALE,
407 [ECANCELED] = TARGET_ECANCELED,
408 [ENOMEDIUM] = TARGET_ENOMEDIUM,
409 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
411 [ENOKEY] = TARGET_ENOKEY,
414 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
417 [EKEYREVOKED] = TARGET_EKEYREVOKED,
420 [EKEYREJECTED] = TARGET_EKEYREJECTED,
423 [EOWNERDEAD] = TARGET_EOWNERDEAD,
425 #ifdef ENOTRECOVERABLE
426 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
430 static inline int host_to_target_errno(int err)
432 if(host_to_target_errno_table[err])
433 return host_to_target_errno_table[err];
437 static inline int target_to_host_errno(int err)
439 if (target_to_host_errno_table[err])
440 return target_to_host_errno_table[err];
444 static inline abi_long get_errno(abi_long ret)
447 return -host_to_target_errno(errno);
452 static inline int is_error(abi_long ret)
454 return (abi_ulong)ret >= (abi_ulong)(-4096);
457 char *target_strerror(int err)
459 return strerror(target_to_host_errno(err));
462 static abi_ulong target_brk;
463 static abi_ulong target_original_brk;
465 void target_set_brk(abi_ulong new_brk)
467 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
470 /* do_brk() must return target values and target errnos. */
471 abi_long do_brk(abi_ulong new_brk)
474 abi_long mapped_addr;
479 if (new_brk < target_original_brk)
482 brk_page = HOST_PAGE_ALIGN(target_brk);
484 /* If the new brk is less than this, set it and we're done... */
485 if (new_brk < brk_page) {
486 target_brk = new_brk;
490 /* We need to allocate more memory after the brk... */
491 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
492 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
493 PROT_READ|PROT_WRITE,
494 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
496 if (!is_error(mapped_addr))
497 target_brk = new_brk;
502 static inline abi_long copy_from_user_fdset(fd_set *fds,
503 abi_ulong target_fds_addr,
507 abi_ulong b, *target_fds;
509 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
510 if (!(target_fds = lock_user(VERIFY_READ,
512 sizeof(abi_ulong) * nw,
514 return -TARGET_EFAULT;
518 for (i = 0; i < nw; i++) {
519 /* grab the abi_ulong */
520 __get_user(b, &target_fds[i]);
521 for (j = 0; j < TARGET_ABI_BITS; j++) {
522 /* check the bit inside the abi_ulong */
529 unlock_user(target_fds, target_fds_addr, 0);
534 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
540 abi_ulong *target_fds;
542 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
543 if (!(target_fds = lock_user(VERIFY_WRITE,
545 sizeof(abi_ulong) * nw,
547 return -TARGET_EFAULT;
550 for (i = 0; i < nw; i++) {
552 for (j = 0; j < TARGET_ABI_BITS; j++) {
553 v |= ((FD_ISSET(k, fds) != 0) << j);
556 __put_user(v, &target_fds[i]);
559 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
564 #if defined(__alpha__)
570 static inline abi_long host_to_target_clock_t(long ticks)
572 #if HOST_HZ == TARGET_HZ
575 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
579 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
580 const struct rusage *rusage)
582 struct target_rusage *target_rusage;
584 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
585 return -TARGET_EFAULT;
586 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
587 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
588 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
589 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
590 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
591 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
592 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
593 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
594 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
595 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
596 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
597 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
598 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
599 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
600 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
601 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
602 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
603 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
604 unlock_user_struct(target_rusage, target_addr, 1);
609 static inline abi_long copy_from_user_timeval(struct timeval *tv,
610 abi_ulong target_tv_addr)
612 struct target_timeval *target_tv;
614 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
615 return -TARGET_EFAULT;
617 __get_user(tv->tv_sec, &target_tv->tv_sec);
618 __get_user(tv->tv_usec, &target_tv->tv_usec);
620 unlock_user_struct(target_tv, target_tv_addr, 0);
625 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
626 const struct timeval *tv)
628 struct target_timeval *target_tv;
630 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
631 return -TARGET_EFAULT;
633 __put_user(tv->tv_sec, &target_tv->tv_sec);
634 __put_user(tv->tv_usec, &target_tv->tv_usec);
636 unlock_user_struct(target_tv, target_tv_addr, 1);
641 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
642 abi_ulong target_mq_attr_addr)
644 struct target_mq_attr *target_mq_attr;
646 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
647 target_mq_attr_addr, 1))
648 return -TARGET_EFAULT;
650 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
651 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
652 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
653 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
655 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
660 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
661 const struct mq_attr *attr)
663 struct target_mq_attr *target_mq_attr;
665 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
666 target_mq_attr_addr, 0))
667 return -TARGET_EFAULT;
669 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
670 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
671 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
672 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
674 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
679 /* do_select() must return target values and target errnos. */
680 static abi_long do_select(int n,
681 abi_ulong rfd_addr, abi_ulong wfd_addr,
682 abi_ulong efd_addr, abi_ulong target_tv_addr)
684 fd_set rfds, wfds, efds;
685 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
686 struct timeval tv, *tv_ptr;
690 if (copy_from_user_fdset(&rfds, rfd_addr, n))
691 return -TARGET_EFAULT;
697 if (copy_from_user_fdset(&wfds, wfd_addr, n))
698 return -TARGET_EFAULT;
704 if (copy_from_user_fdset(&efds, efd_addr, n))
705 return -TARGET_EFAULT;
711 if (target_tv_addr) {
712 if (copy_from_user_timeval(&tv, target_tv_addr))
713 return -TARGET_EFAULT;
719 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
721 if (!is_error(ret)) {
722 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
723 return -TARGET_EFAULT;
724 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
725 return -TARGET_EFAULT;
726 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
727 return -TARGET_EFAULT;
729 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
730 return -TARGET_EFAULT;
736 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
737 abi_ulong target_addr,
740 struct target_sockaddr *target_saddr;
742 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
744 return -TARGET_EFAULT;
745 memcpy(addr, target_saddr, len);
746 addr->sa_family = tswap16(target_saddr->sa_family);
747 unlock_user(target_saddr, target_addr, 0);
752 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
753 struct sockaddr *addr,
756 struct target_sockaddr *target_saddr;
758 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
760 return -TARGET_EFAULT;
761 memcpy(target_saddr, addr, len);
762 target_saddr->sa_family = tswap16(addr->sa_family);
763 unlock_user(target_saddr, target_addr, len);
768 /* ??? Should this also swap msgh->name? */
769 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
770 struct target_msghdr *target_msgh)
772 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
773 abi_long msg_controllen;
774 abi_ulong target_cmsg_addr;
775 struct target_cmsghdr *target_cmsg;
778 msg_controllen = tswapl(target_msgh->msg_controllen);
779 if (msg_controllen < sizeof (struct target_cmsghdr))
781 target_cmsg_addr = tswapl(target_msgh->msg_control);
782 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
784 return -TARGET_EFAULT;
786 while (cmsg && target_cmsg) {
787 void *data = CMSG_DATA(cmsg);
788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
790 int len = tswapl(target_cmsg->cmsg_len)
791 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
793 space += CMSG_SPACE(len);
794 if (space > msgh->msg_controllen) {
795 space -= CMSG_SPACE(len);
796 gemu_log("Host cmsg overflow\n");
800 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
801 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
802 cmsg->cmsg_len = CMSG_LEN(len);
804 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
805 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
806 memcpy(data, target_data, len);
808 int *fd = (int *)data;
809 int *target_fd = (int *)target_data;
810 int i, numfds = len / sizeof(int);
812 for (i = 0; i < numfds; i++)
813 fd[i] = tswap32(target_fd[i]);
816 cmsg = CMSG_NXTHDR(msgh, cmsg);
817 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
819 unlock_user(target_cmsg, target_cmsg_addr, 0);
821 msgh->msg_controllen = space;
825 /* ??? Should this also swap msgh->name? */
826 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
829 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
830 abi_long msg_controllen;
831 abi_ulong target_cmsg_addr;
832 struct target_cmsghdr *target_cmsg;
835 msg_controllen = tswapl(target_msgh->msg_controllen);
836 if (msg_controllen < sizeof (struct target_cmsghdr))
838 target_cmsg_addr = tswapl(target_msgh->msg_control);
839 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
841 return -TARGET_EFAULT;
843 while (cmsg && target_cmsg) {
844 void *data = CMSG_DATA(cmsg);
845 void *target_data = TARGET_CMSG_DATA(target_cmsg);
847 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
849 space += TARGET_CMSG_SPACE(len);
850 if (space > msg_controllen) {
851 space -= TARGET_CMSG_SPACE(len);
852 gemu_log("Target cmsg overflow\n");
856 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
857 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
858 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
860 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
861 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
862 memcpy(target_data, data, len);
864 int *fd = (int *)data;
865 int *target_fd = (int *)target_data;
866 int i, numfds = len / sizeof(int);
868 for (i = 0; i < numfds; i++)
869 target_fd[i] = tswap32(fd[i]);
872 cmsg = CMSG_NXTHDR(msgh, cmsg);
873 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
875 unlock_user(target_cmsg, target_cmsg_addr, space);
877 target_msgh->msg_controllen = tswapl(space);
881 /* do_setsockopt() Must return target values and target errnos. */
882 static abi_long do_setsockopt(int sockfd, int level, int optname,
883 abi_ulong optval_addr, socklen_t optlen)
890 /* TCP options all take an 'int' value. */
891 if (optlen < sizeof(uint32_t))
892 return -TARGET_EINVAL;
894 if (get_user_u32(val, optval_addr))
895 return -TARGET_EFAULT;
896 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
903 case IP_ROUTER_ALERT:
907 case IP_MTU_DISCOVER:
913 case IP_MULTICAST_TTL:
914 case IP_MULTICAST_LOOP:
916 if (optlen >= sizeof(uint32_t)) {
917 if (get_user_u32(val, optval_addr))
918 return -TARGET_EFAULT;
919 } else if (optlen >= 1) {
920 if (get_user_u8(val, optval_addr))
921 return -TARGET_EFAULT;
923 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
929 case TARGET_SOL_SOCKET:
931 /* Options with 'int' argument. */
932 case TARGET_SO_DEBUG:
935 case TARGET_SO_REUSEADDR:
936 optname = SO_REUSEADDR;
941 case TARGET_SO_ERROR:
944 case TARGET_SO_DONTROUTE:
945 optname = SO_DONTROUTE;
947 case TARGET_SO_BROADCAST:
948 optname = SO_BROADCAST;
950 case TARGET_SO_SNDBUF:
953 case TARGET_SO_RCVBUF:
956 case TARGET_SO_KEEPALIVE:
957 optname = SO_KEEPALIVE;
959 case TARGET_SO_OOBINLINE:
960 optname = SO_OOBINLINE;
962 case TARGET_SO_NO_CHECK:
963 optname = SO_NO_CHECK;
965 case TARGET_SO_PRIORITY:
966 optname = SO_PRIORITY;
969 case TARGET_SO_BSDCOMPAT:
970 optname = SO_BSDCOMPAT;
973 case TARGET_SO_PASSCRED:
974 optname = SO_PASSCRED;
976 case TARGET_SO_TIMESTAMP:
977 optname = SO_TIMESTAMP;
979 case TARGET_SO_RCVLOWAT:
980 optname = SO_RCVLOWAT;
982 case TARGET_SO_RCVTIMEO:
983 optname = SO_RCVTIMEO;
985 case TARGET_SO_SNDTIMEO:
986 optname = SO_SNDTIMEO;
992 if (optlen < sizeof(uint32_t))
993 return -TARGET_EINVAL;
995 if (get_user_u32(val, optval_addr))
996 return -TARGET_EFAULT;
997 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1001 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1002 ret = -TARGET_ENOPROTOOPT;
1007 /* do_getsockopt() Must return target values and target errnos. */
1008 static abi_long do_getsockopt(int sockfd, int level, int optname,
1009 abi_ulong optval_addr, abi_ulong optlen)
1016 case TARGET_SOL_SOCKET:
1019 case TARGET_SO_LINGER:
1020 case TARGET_SO_RCVTIMEO:
1021 case TARGET_SO_SNDTIMEO:
1022 case TARGET_SO_PEERCRED:
1023 case TARGET_SO_PEERNAME:
1024 /* These don't just return a single integer */
1031 /* TCP options all take an 'int' value. */
1033 if (get_user_u32(len, optlen))
1034 return -TARGET_EFAULT;
1036 return -TARGET_EINVAL;
1038 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1045 if (put_user_u32(val, optval_addr))
1046 return -TARGET_EFAULT;
1048 if (put_user_u8(val, optval_addr))
1049 return -TARGET_EFAULT;
1051 if (put_user_u32(len, optlen))
1052 return -TARGET_EFAULT;
1059 case IP_ROUTER_ALERT:
1063 case IP_MTU_DISCOVER:
1069 case IP_MULTICAST_TTL:
1070 case IP_MULTICAST_LOOP:
1071 if (get_user_u32(len, optlen))
1072 return -TARGET_EFAULT;
1074 return -TARGET_EINVAL;
1076 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1079 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1081 if (put_user_u32(len, optlen)
1082 || put_user_u8(val, optval_addr))
1083 return -TARGET_EFAULT;
1085 if (len > sizeof(int))
1087 if (put_user_u32(len, optlen)
1088 || put_user_u32(val, optval_addr))
1089 return -TARGET_EFAULT;
1093 ret = -TARGET_ENOPROTOOPT;
1099 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1101 ret = -TARGET_EOPNOTSUPP;
1108 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1109 * other lock functions have a return code of 0 for failure.
1111 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1112 int count, int copy)
1114 struct target_iovec *target_vec;
1118 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1120 return -TARGET_EFAULT;
1121 for(i = 0;i < count; i++) {
1122 base = tswapl(target_vec[i].iov_base);
1123 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1124 if (vec[i].iov_len != 0) {
1125 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1126 /* Don't check lock_user return value. We must call writev even
1127 if a element has invalid base address. */
1129 /* zero length pointer is ignored */
1130 vec[i].iov_base = NULL;
1133 unlock_user (target_vec, target_addr, 0);
1137 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1138 int count, int copy)
1140 struct target_iovec *target_vec;
1144 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1146 return -TARGET_EFAULT;
1147 for(i = 0;i < count; i++) {
1148 if (target_vec[i].iov_base) {
1149 base = tswapl(target_vec[i].iov_base);
1150 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1153 unlock_user (target_vec, target_addr, 0);
1158 /* do_socket() Must return target values and target errnos. */
1159 static abi_long do_socket(int domain, int type, int protocol)
1161 #if defined(TARGET_MIPS)
1163 case TARGET_SOCK_DGRAM:
1166 case TARGET_SOCK_STREAM:
1169 case TARGET_SOCK_RAW:
1172 case TARGET_SOCK_RDM:
1175 case TARGET_SOCK_SEQPACKET:
1176 type = SOCK_SEQPACKET;
1178 case TARGET_SOCK_PACKET:
1183 if (domain == PF_NETLINK)
1184 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1185 return get_errno(socket(domain, type, protocol));
1188 /* MAX_SOCK_ADDR from linux/net/socket.c */
1189 #define MAX_SOCK_ADDR 128
1191 /* do_bind() Must return target values and target errnos. */
1192 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1197 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1198 return -TARGET_EINVAL;
1200 addr = alloca(addrlen);
1202 target_to_host_sockaddr(addr, target_addr, addrlen);
1203 return get_errno(bind(sockfd, addr, addrlen));
1206 /* do_connect() Must return target values and target errnos. */
1207 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1212 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1213 return -TARGET_EINVAL;
1215 addr = alloca(addrlen);
1217 target_to_host_sockaddr(addr, target_addr, addrlen);
1218 return get_errno(connect(sockfd, addr, addrlen));
1221 /* do_sendrecvmsg() Must return target values and target errnos. */
1222 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1223 int flags, int send)
1226 struct target_msghdr *msgp;
1230 abi_ulong target_vec;
1233 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1237 return -TARGET_EFAULT;
1238 if (msgp->msg_name) {
1239 msg.msg_namelen = tswap32(msgp->msg_namelen);
1240 msg.msg_name = alloca(msg.msg_namelen);
1241 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1244 msg.msg_name = NULL;
1245 msg.msg_namelen = 0;
1247 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1248 msg.msg_control = alloca(msg.msg_controllen);
1249 msg.msg_flags = tswap32(msgp->msg_flags);
1251 count = tswapl(msgp->msg_iovlen);
1252 vec = alloca(count * sizeof(struct iovec));
1253 target_vec = tswapl(msgp->msg_iov);
1254 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1255 msg.msg_iovlen = count;
1259 ret = target_to_host_cmsg(&msg, msgp);
1261 ret = get_errno(sendmsg(fd, &msg, flags));
1263 ret = get_errno(recvmsg(fd, &msg, flags));
1264 if (!is_error(ret)) {
1266 ret = host_to_target_cmsg(msgp, &msg);
1271 unlock_iovec(vec, target_vec, count, !send);
1272 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1276 /* do_accept() Must return target values and target errnos. */
1277 static abi_long do_accept(int fd, abi_ulong target_addr,
1278 abi_ulong target_addrlen_addr)
1284 if (get_user_u32(addrlen, target_addrlen_addr))
1285 return -TARGET_EFAULT;
1287 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1288 return -TARGET_EINVAL;
1290 addr = alloca(addrlen);
1292 ret = get_errno(accept(fd, addr, &addrlen));
1293 if (!is_error(ret)) {
1294 host_to_target_sockaddr(target_addr, addr, addrlen);
1295 if (put_user_u32(addrlen, target_addrlen_addr))
1296 ret = -TARGET_EFAULT;
1301 /* do_getpeername() Must return target values and target errnos. */
1302 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1303 abi_ulong target_addrlen_addr)
1309 if (get_user_u32(addrlen, target_addrlen_addr))
1310 return -TARGET_EFAULT;
1312 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1313 return -TARGET_EINVAL;
1315 addr = alloca(addrlen);
1317 ret = get_errno(getpeername(fd, addr, &addrlen));
1318 if (!is_error(ret)) {
1319 host_to_target_sockaddr(target_addr, addr, addrlen);
1320 if (put_user_u32(addrlen, target_addrlen_addr))
1321 ret = -TARGET_EFAULT;
1326 /* do_getsockname() Must return target values and target errnos. */
1327 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1328 abi_ulong target_addrlen_addr)
1334 if (get_user_u32(addrlen, target_addrlen_addr))
1335 return -TARGET_EFAULT;
1337 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1338 return -TARGET_EINVAL;
1340 addr = alloca(addrlen);
1342 ret = get_errno(getsockname(fd, addr, &addrlen));
1343 if (!is_error(ret)) {
1344 host_to_target_sockaddr(target_addr, addr, addrlen);
1345 if (put_user_u32(addrlen, target_addrlen_addr))
1346 ret = -TARGET_EFAULT;
1351 /* do_socketpair() Must return target values and target errnos. */
1352 static abi_long do_socketpair(int domain, int type, int protocol,
1353 abi_ulong target_tab_addr)
1358 ret = get_errno(socketpair(domain, type, protocol, tab));
1359 if (!is_error(ret)) {
1360 if (put_user_s32(tab[0], target_tab_addr)
1361 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1362 ret = -TARGET_EFAULT;
1367 /* do_sendto() Must return target values and target errnos. */
1368 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1369 abi_ulong target_addr, socklen_t addrlen)
1375 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1376 return -TARGET_EINVAL;
1378 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1380 return -TARGET_EFAULT;
1382 addr = alloca(addrlen);
1383 target_to_host_sockaddr(addr, target_addr, addrlen);
1384 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1386 ret = get_errno(send(fd, host_msg, len, flags));
1388 unlock_user(host_msg, msg, 0);
1392 /* do_recvfrom() Must return target values and target errnos. */
1393 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1394 abi_ulong target_addr,
1395 abi_ulong target_addrlen)
1402 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1404 return -TARGET_EFAULT;
1406 if (get_user_u32(addrlen, target_addrlen)) {
1407 ret = -TARGET_EFAULT;
1410 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1411 ret = -TARGET_EINVAL;
1414 addr = alloca(addrlen);
1415 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1417 addr = NULL; /* To keep compiler quiet. */
1418 ret = get_errno(recv(fd, host_msg, len, flags));
1420 if (!is_error(ret)) {
1422 host_to_target_sockaddr(target_addr, addr, addrlen);
1423 if (put_user_u32(addrlen, target_addrlen)) {
1424 ret = -TARGET_EFAULT;
1428 unlock_user(host_msg, msg, len);
1431 unlock_user(host_msg, msg, 0);
1436 #ifdef TARGET_NR_socketcall
1437 /* do_socketcall() Must return target values and target errnos. */
1438 static abi_long do_socketcall(int num, abi_ulong vptr)
1441 const int n = sizeof(abi_ulong);
1446 int domain, type, protocol;
1448 if (get_user_s32(domain, vptr)
1449 || get_user_s32(type, vptr + n)
1450 || get_user_s32(protocol, vptr + 2 * n))
1451 return -TARGET_EFAULT;
1453 ret = do_socket(domain, type, protocol);
1459 abi_ulong target_addr;
1462 if (get_user_s32(sockfd, vptr)
1463 || get_user_ual(target_addr, vptr + n)
1464 || get_user_u32(addrlen, vptr + 2 * n))
1465 return -TARGET_EFAULT;
1467 ret = do_bind(sockfd, target_addr, addrlen);
1470 case SOCKOP_connect:
1473 abi_ulong target_addr;
1476 if (get_user_s32(sockfd, vptr)
1477 || get_user_ual(target_addr, vptr + n)
1478 || get_user_u32(addrlen, vptr + 2 * n))
1479 return -TARGET_EFAULT;
1481 ret = do_connect(sockfd, target_addr, addrlen);
1486 int sockfd, backlog;
1488 if (get_user_s32(sockfd, vptr)
1489 || get_user_s32(backlog, vptr + n))
1490 return -TARGET_EFAULT;
1492 ret = get_errno(listen(sockfd, backlog));
1498 abi_ulong target_addr, target_addrlen;
1500 if (get_user_s32(sockfd, vptr)
1501 || get_user_ual(target_addr, vptr + n)
1502 || get_user_u32(target_addrlen, vptr + 2 * n))
1503 return -TARGET_EFAULT;
1505 ret = do_accept(sockfd, target_addr, target_addrlen);
1508 case SOCKOP_getsockname:
1511 abi_ulong target_addr, target_addrlen;
1513 if (get_user_s32(sockfd, vptr)
1514 || get_user_ual(target_addr, vptr + n)
1515 || get_user_u32(target_addrlen, vptr + 2 * n))
1516 return -TARGET_EFAULT;
1518 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1521 case SOCKOP_getpeername:
1524 abi_ulong target_addr, target_addrlen;
1526 if (get_user_s32(sockfd, vptr)
1527 || get_user_ual(target_addr, vptr + n)
1528 || get_user_u32(target_addrlen, vptr + 2 * n))
1529 return -TARGET_EFAULT;
1531 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1534 case SOCKOP_socketpair:
1536 int domain, type, protocol;
1539 if (get_user_s32(domain, vptr)
1540 || get_user_s32(type, vptr + n)
1541 || get_user_s32(protocol, vptr + 2 * n)
1542 || get_user_ual(tab, vptr + 3 * n))
1543 return -TARGET_EFAULT;
1545 ret = do_socketpair(domain, type, protocol, tab);
1555 if (get_user_s32(sockfd, vptr)
1556 || get_user_ual(msg, vptr + n)
1557 || get_user_ual(len, vptr + 2 * n)
1558 || get_user_s32(flags, vptr + 3 * n))
1559 return -TARGET_EFAULT;
1561 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1571 if (get_user_s32(sockfd, vptr)
1572 || get_user_ual(msg, vptr + n)
1573 || get_user_ual(len, vptr + 2 * n)
1574 || get_user_s32(flags, vptr + 3 * n))
1575 return -TARGET_EFAULT;
1577 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1589 if (get_user_s32(sockfd, vptr)
1590 || get_user_ual(msg, vptr + n)
1591 || get_user_ual(len, vptr + 2 * n)
1592 || get_user_s32(flags, vptr + 3 * n)
1593 || get_user_ual(addr, vptr + 4 * n)
1594 || get_user_u32(addrlen, vptr + 5 * n))
1595 return -TARGET_EFAULT;
1597 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1600 case SOCKOP_recvfrom:
1609 if (get_user_s32(sockfd, vptr)
1610 || get_user_ual(msg, vptr + n)
1611 || get_user_ual(len, vptr + 2 * n)
1612 || get_user_s32(flags, vptr + 3 * n)
1613 || get_user_ual(addr, vptr + 4 * n)
1614 || get_user_u32(addrlen, vptr + 5 * n))
1615 return -TARGET_EFAULT;
1617 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1620 case SOCKOP_shutdown:
1624 if (get_user_s32(sockfd, vptr)
1625 || get_user_s32(how, vptr + n))
1626 return -TARGET_EFAULT;
1628 ret = get_errno(shutdown(sockfd, how));
1631 case SOCKOP_sendmsg:
1632 case SOCKOP_recvmsg:
1635 abi_ulong target_msg;
1638 if (get_user_s32(fd, vptr)
1639 || get_user_ual(target_msg, vptr + n)
1640 || get_user_s32(flags, vptr + 2 * n))
1641 return -TARGET_EFAULT;
1643 ret = do_sendrecvmsg(fd, target_msg, flags,
1644 (num == SOCKOP_sendmsg));
1647 case SOCKOP_setsockopt:
1655 if (get_user_s32(sockfd, vptr)
1656 || get_user_s32(level, vptr + n)
1657 || get_user_s32(optname, vptr + 2 * n)
1658 || get_user_ual(optval, vptr + 3 * n)
1659 || get_user_u32(optlen, vptr + 4 * n))
1660 return -TARGET_EFAULT;
1662 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1665 case SOCKOP_getsockopt:
1673 if (get_user_s32(sockfd, vptr)
1674 || get_user_s32(level, vptr + n)
1675 || get_user_s32(optname, vptr + 2 * n)
1676 || get_user_ual(optval, vptr + 3 * n)
1677 || get_user_u32(optlen, vptr + 4 * n))
1678 return -TARGET_EFAULT;
1680 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1684 gemu_log("Unsupported socketcall: %d\n", num);
1685 ret = -TARGET_ENOSYS;
1692 #define N_SHM_REGIONS 32
1694 static struct shm_region {
1697 } shm_regions[N_SHM_REGIONS];
1699 struct target_ipc_perm
1706 unsigned short int mode;
1707 unsigned short int __pad1;
1708 unsigned short int __seq;
1709 unsigned short int __pad2;
1710 abi_ulong __unused1;
1711 abi_ulong __unused2;
1714 struct target_semid_ds
1716 struct target_ipc_perm sem_perm;
1717 abi_ulong sem_otime;
1718 abi_ulong __unused1;
1719 abi_ulong sem_ctime;
1720 abi_ulong __unused2;
1721 abi_ulong sem_nsems;
1722 abi_ulong __unused3;
1723 abi_ulong __unused4;
1726 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1727 abi_ulong target_addr)
1729 struct target_ipc_perm *target_ip;
1730 struct target_semid_ds *target_sd;
1732 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1733 return -TARGET_EFAULT;
1734 target_ip=&(target_sd->sem_perm);
1735 host_ip->__key = tswapl(target_ip->__key);
1736 host_ip->uid = tswapl(target_ip->uid);
1737 host_ip->gid = tswapl(target_ip->gid);
1738 host_ip->cuid = tswapl(target_ip->cuid);
1739 host_ip->cgid = tswapl(target_ip->cgid);
1740 host_ip->mode = tswapl(target_ip->mode);
1741 unlock_user_struct(target_sd, target_addr, 0);
1745 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1746 struct ipc_perm *host_ip)
1748 struct target_ipc_perm *target_ip;
1749 struct target_semid_ds *target_sd;
1751 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1752 return -TARGET_EFAULT;
1753 target_ip = &(target_sd->sem_perm);
1754 target_ip->__key = tswapl(host_ip->__key);
1755 target_ip->uid = tswapl(host_ip->uid);
1756 target_ip->gid = tswapl(host_ip->gid);
1757 target_ip->cuid = tswapl(host_ip->cuid);
1758 target_ip->cgid = tswapl(host_ip->cgid);
1759 target_ip->mode = tswapl(host_ip->mode);
1760 unlock_user_struct(target_sd, target_addr, 1);
1764 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1765 abi_ulong target_addr)
1767 struct target_semid_ds *target_sd;
1769 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1770 return -TARGET_EFAULT;
1771 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
1772 return -TARGET_EFAULT;
1773 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1774 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1775 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1776 unlock_user_struct(target_sd, target_addr, 0);
1780 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1781 struct semid_ds *host_sd)
1783 struct target_semid_ds *target_sd;
1785 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1786 return -TARGET_EFAULT;
1787 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
1788 return -TARGET_EFAULT;;
1789 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1790 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1791 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1792 unlock_user_struct(target_sd, target_addr, 1);
1796 struct target_seminfo {
1809 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
1810 struct seminfo *host_seminfo)
1812 struct target_seminfo *target_seminfo;
1813 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
1814 return -TARGET_EFAULT;
1815 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
1816 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
1817 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
1818 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
1819 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
1820 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
1821 __put_user(host_seminfo->semume, &target_seminfo->semume);
1822 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
1823 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
1824 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
1825 unlock_user_struct(target_seminfo, target_addr, 1);
1831 struct semid_ds *buf;
1832 unsigned short *array;
1833 struct seminfo *__buf;
1836 union target_semun {
1843 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
1844 abi_ulong target_addr)
1847 unsigned short *array;
1849 struct semid_ds semid_ds;
1852 semun.buf = &semid_ds;
1854 ret = semctl(semid, 0, IPC_STAT, semun);
1856 return get_errno(ret);
1858 nsems = semid_ds.sem_nsems;
1860 *host_array = malloc(nsems*sizeof(unsigned short));
1861 array = lock_user(VERIFY_READ, target_addr,
1862 nsems*sizeof(unsigned short), 1);
1864 return -TARGET_EFAULT;
1866 for(i=0; i<nsems; i++) {
1867 __get_user((*host_array)[i], &array[i]);
1869 unlock_user(array, target_addr, 0);
1874 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
1875 unsigned short **host_array)
1878 unsigned short *array;
1880 struct semid_ds semid_ds;
1883 semun.buf = &semid_ds;
1885 ret = semctl(semid, 0, IPC_STAT, semun);
1887 return get_errno(ret);
1889 nsems = semid_ds.sem_nsems;
1891 array = lock_user(VERIFY_WRITE, target_addr,
1892 nsems*sizeof(unsigned short), 0);
1894 return -TARGET_EFAULT;
1896 for(i=0; i<nsems; i++) {
1897 __put_user((*host_array)[i], &array[i]);
1900 unlock_user(array, target_addr, 1);
1905 static inline abi_long do_semctl(int semid, int semnum, int cmd,
1906 union target_semun target_su)
1909 struct semid_ds dsarg;
1910 unsigned short *array;
1911 struct seminfo seminfo;
1912 abi_long ret = -TARGET_EINVAL;
1921 err = target_to_host_semid_ds(&dsarg, target_su.buf);
1925 ret = get_errno(semctl(semid, semnum, cmd, arg));
1926 err = host_to_target_semid_ds(target_su.buf, &dsarg);
1932 arg.val = tswapl(target_su.val);
1933 ret = get_errno(semctl(semid, semnum, cmd, arg));
1934 target_su.val = tswapl(arg.val);
1938 err = target_to_host_semarray(semid, &array, target_su.array);
1942 ret = get_errno(semctl(semid, semnum, cmd, arg));
1943 err = host_to_target_semarray(semid, target_su.array, &array);
1949 arg.__buf = &seminfo;
1950 ret = get_errno(semctl(semid, semnum, cmd, arg));
1951 err = host_to_target_seminfo(target_su.__buf, &seminfo);
1959 ret = get_errno(semctl(semid, semnum, cmd, NULL));
1966 struct target_sembuf {
1967 unsigned short sem_num;
1972 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
1973 abi_ulong target_addr,
1976 struct target_sembuf *target_sembuf;
1979 target_sembuf = lock_user(VERIFY_READ, target_addr,
1980 nsops*sizeof(struct target_sembuf), 1);
1982 return -TARGET_EFAULT;
1984 for(i=0; i<nsops; i++) {
1985 __put_user(target_sembuf[i].sem_num, &host_sembuf[i].sem_num);
1986 __put_user(target_sembuf[i].sem_op, &host_sembuf[i].sem_op);
1987 __put_user(target_sembuf[i].sem_flg, &host_sembuf[i].sem_flg);
1990 unlock_user(target_sembuf, target_addr, 0);
1995 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
1997 struct sembuf sops[nsops];
1999 if (target_to_host_sembuf(sops, ptr, nsops))
2000 return -TARGET_EFAULT;
2002 return semop(semid, sops, nsops);
2005 struct target_msqid_ds
2007 struct target_ipc_perm msg_perm;
2008 abi_ulong msg_stime;
2009 #if TARGET_ABI_BITS == 32
2010 abi_ulong __unused1;
2012 abi_ulong msg_rtime;
2013 #if TARGET_ABI_BITS == 32
2014 abi_ulong __unused2;
2016 abi_ulong msg_ctime;
2017 #if TARGET_ABI_BITS == 32
2018 abi_ulong __unused3;
2020 abi_ulong __msg_cbytes;
2022 abi_ulong msg_qbytes;
2023 abi_ulong msg_lspid;
2024 abi_ulong msg_lrpid;
2025 abi_ulong __unused4;
2026 abi_ulong __unused5;
2029 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2030 abi_ulong target_addr)
2032 struct target_msqid_ds *target_md;
2034 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2035 return -TARGET_EFAULT;
2036 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2037 return -TARGET_EFAULT;
2038 host_md->msg_stime = tswapl(target_md->msg_stime);
2039 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2040 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2041 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2042 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2043 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2044 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2045 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2046 unlock_user_struct(target_md, target_addr, 0);
2050 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2051 struct msqid_ds *host_md)
2053 struct target_msqid_ds *target_md;
2055 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2056 return -TARGET_EFAULT;
2057 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2058 return -TARGET_EFAULT;
2059 target_md->msg_stime = tswapl(host_md->msg_stime);
2060 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2061 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2062 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2063 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2064 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2065 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2066 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2067 unlock_user_struct(target_md, target_addr, 1);
2071 struct target_msginfo {
2079 unsigned short int msgseg;
2082 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2083 struct msginfo *host_msginfo)
2085 struct target_msginfo *target_msginfo;
2086 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2087 return -TARGET_EFAULT;
2088 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2089 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2090 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2091 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2092 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2093 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2094 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2095 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2096 unlock_user_struct(target_msginfo, target_addr, 1);
2100 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2102 struct msqid_ds dsarg;
2103 struct msginfo msginfo;
2104 abi_long ret = -TARGET_EINVAL;
2112 if (target_to_host_msqid_ds(&dsarg,ptr))
2113 return -TARGET_EFAULT;
2114 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2115 if (host_to_target_msqid_ds(ptr,&dsarg))
2116 return -TARGET_EFAULT;
2119 ret = get_errno(msgctl(msgid, cmd, NULL));
2123 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2124 if (host_to_target_msginfo(ptr, &msginfo))
2125 return -TARGET_EFAULT;
2132 struct target_msgbuf {
2137 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2138 unsigned int msgsz, int msgflg)
2140 struct target_msgbuf *target_mb;
2141 struct msgbuf *host_mb;
2144 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2145 return -TARGET_EFAULT;
2146 host_mb = malloc(msgsz+sizeof(long));
2147 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2148 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2149 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2151 unlock_user_struct(target_mb, msgp, 0);
2156 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2157 unsigned int msgsz, abi_long msgtyp,
2160 struct target_msgbuf *target_mb;
2162 struct msgbuf *host_mb;
2165 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2166 return -TARGET_EFAULT;
2168 host_mb = malloc(msgsz+sizeof(long));
2169 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2172 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2173 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2174 if (!target_mtext) {
2175 ret = -TARGET_EFAULT;
2178 memcpy(target_mb->mtext, host_mb->mtext, ret);
2179 unlock_user(target_mtext, target_mtext_addr, ret);
2182 target_mb->mtype = tswapl(host_mb->mtype);
2187 unlock_user_struct(target_mb, msgp, 1);
2191 struct target_shmid_ds
2193 struct target_ipc_perm shm_perm;
2194 abi_ulong shm_segsz;
2195 abi_ulong shm_atime;
2196 #if TARGET_ABI_BITS == 32
2197 abi_ulong __unused1;
2199 abi_ulong shm_dtime;
2200 #if TARGET_ABI_BITS == 32
2201 abi_ulong __unused2;
2203 abi_ulong shm_ctime;
2204 #if TARGET_ABI_BITS == 32
2205 abi_ulong __unused3;
2209 abi_ulong shm_nattch;
2210 unsigned long int __unused4;
2211 unsigned long int __unused5;
2214 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2215 abi_ulong target_addr)
2217 struct target_shmid_ds *target_sd;
2219 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2220 return -TARGET_EFAULT;
2221 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2222 return -TARGET_EFAULT;
2223 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2224 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2225 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2226 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2227 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2228 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2229 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2230 unlock_user_struct(target_sd, target_addr, 0);
2234 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2235 struct shmid_ds *host_sd)
2237 struct target_shmid_ds *target_sd;
2239 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2240 return -TARGET_EFAULT;
2241 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2242 return -TARGET_EFAULT;
2243 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2244 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2245 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2246 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2247 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2248 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2249 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2250 unlock_user_struct(target_sd, target_addr, 1);
2254 struct target_shminfo {
2262 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2263 struct shminfo *host_shminfo)
2265 struct target_shminfo *target_shminfo;
2266 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2267 return -TARGET_EFAULT;
2268 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2269 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2270 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2271 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2272 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2273 unlock_user_struct(target_shminfo, target_addr, 1);
2277 struct target_shm_info {
2282 abi_ulong swap_attempts;
2283 abi_ulong swap_successes;
2286 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2287 struct shm_info *host_shm_info)
2289 struct target_shm_info *target_shm_info;
2290 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2291 return -TARGET_EFAULT;
2292 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2293 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2294 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2295 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2296 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2297 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2298 unlock_user_struct(target_shm_info, target_addr, 1);
2302 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2304 struct shmid_ds dsarg;
2305 struct shminfo shminfo;
2306 struct shm_info shm_info;
2307 abi_long ret = -TARGET_EINVAL;
2315 if (target_to_host_shmid_ds(&dsarg, buf))
2316 return -TARGET_EFAULT;
2317 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2318 if (host_to_target_shmid_ds(buf, &dsarg))
2319 return -TARGET_EFAULT;
2322 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2323 if (host_to_target_shminfo(buf, &shminfo))
2324 return -TARGET_EFAULT;
2327 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2328 if (host_to_target_shm_info(buf, &shm_info))
2329 return -TARGET_EFAULT;
2334 ret = get_errno(shmctl(shmid, cmd, NULL));
2341 static inline abi_long do_shmat(int shmid, abi_ulong shmaddr, int shmflg,
2342 unsigned long *raddr)
2344 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
2346 struct shmid_ds shm_info;
2349 /* find out the length of the shared memory segment */
2350 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2351 if (is_error(ret)) {
2352 /* can't get length, bail out */
2353 return get_errno(ret);
2359 *raddr = (unsigned long) shmat(shmid, g2h(shmaddr), shmflg);
2361 abi_ulong mmap_start;
2363 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2365 if (mmap_start == -1) {
2369 *raddr = (unsigned long) shmat(shmid, g2h(mmap_start),
2370 shmflg | SHM_REMAP);
2375 return get_errno(*raddr);
2378 page_set_flags(h2g(*raddr), h2g(*raddr) + shm_info.shm_segsz,
2379 PAGE_VALID | PAGE_READ |
2380 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2382 for (i = 0; i < N_SHM_REGIONS; i++) {
2383 if (shm_regions[i].start == 0) {
2384 shm_regions[i].start = h2g(*raddr);
2385 shm_regions[i].size = shm_info.shm_segsz;
2394 static inline abi_long do_shmdt(abi_ulong shmaddr)
2398 for (i = 0; i < N_SHM_REGIONS; ++i) {
2399 if (shm_regions[i].start == shmaddr) {
2400 shm_regions[i].start = 0;
2401 page_set_flags(shmaddr, shm_regions[i].size, 0);
2406 return get_errno(shmdt(g2h(shmaddr)));
2409 #ifdef TARGET_NR_ipc
2410 /* ??? This only works with linear mappings. */
2411 /* do_ipc() must return target values and target errnos. */
2412 static abi_long do_ipc(unsigned int call, int first,
2413 int second, int third,
2414 abi_long ptr, abi_long fifth)
2419 version = call >> 16;
2424 ret = do_semop(first, ptr, second);
2428 ret = get_errno(semget(first, second, third));
2432 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2436 ret = get_errno(msgget(first, second));
2440 ret = do_msgsnd(first, ptr, second, third);
2444 ret = do_msgctl(first, second, ptr);
2451 struct target_ipc_kludge {
2456 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2457 ret = -TARGET_EFAULT;
2461 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2463 unlock_user_struct(tmp, ptr, 0);
2467 ret = do_msgrcv(first, ptr, second, fifth, third);
2475 unsigned long raddr;
2477 ret = do_shmat(first, ptr, second, &raddr);
2481 ret = put_user_ual(raddr, third);
2485 ret = -TARGET_EINVAL;
2491 ret = do_shmdt(ptr);
2495 ret = get_errno(shmget(first, second, third));
2499 ret = do_shmctl(first, second, third);
2503 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2504 ret = -TARGET_ENOSYS;
2511 /* kernel structure types definitions */
2514 #define STRUCT(name, list...) STRUCT_ ## name,
2515 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2517 #include "syscall_types.h"
2520 #undef STRUCT_SPECIAL
2522 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2523 #define STRUCT_SPECIAL(name)
2524 #include "syscall_types.h"
2526 #undef STRUCT_SPECIAL
2528 typedef struct IOCTLEntry {
2529 unsigned int target_cmd;
2530 unsigned int host_cmd;
2533 const argtype arg_type[5];
2536 #define IOC_R 0x0001
2537 #define IOC_W 0x0002
2538 #define IOC_RW (IOC_R | IOC_W)
2540 #define MAX_STRUCT_SIZE 4096
2542 static IOCTLEntry ioctl_entries[] = {
2543 #define IOCTL(cmd, access, types...) \
2544 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2549 /* ??? Implement proper locking for ioctls. */
2550 /* do_ioctl() Must return target values and target errnos. */
2551 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2553 const IOCTLEntry *ie;
2554 const argtype *arg_type;
2556 uint8_t buf_temp[MAX_STRUCT_SIZE];
2562 if (ie->target_cmd == 0) {
2563 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2564 return -TARGET_ENOSYS;
2566 if (ie->target_cmd == cmd)
2570 arg_type = ie->arg_type;
2572 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2574 switch(arg_type[0]) {
2577 ret = get_errno(ioctl(fd, ie->host_cmd));
2582 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2586 target_size = thunk_type_size(arg_type, 0);
2587 switch(ie->access) {
2589 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2590 if (!is_error(ret)) {
2591 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2593 return -TARGET_EFAULT;
2594 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2595 unlock_user(argptr, arg, target_size);
2599 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2601 return -TARGET_EFAULT;
2602 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2603 unlock_user(argptr, arg, 0);
2604 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2608 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2610 return -TARGET_EFAULT;
2611 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2612 unlock_user(argptr, arg, 0);
2613 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2614 if (!is_error(ret)) {
2615 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2617 return -TARGET_EFAULT;
2618 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2619 unlock_user(argptr, arg, target_size);
2625 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2626 (long)cmd, arg_type[0]);
2627 ret = -TARGET_ENOSYS;
2633 static const bitmask_transtbl iflag_tbl[] = {
2634 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2635 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2636 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2637 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2638 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2639 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2640 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2641 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2642 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2643 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2644 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2645 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2646 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2647 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2651 static const bitmask_transtbl oflag_tbl[] = {
2652 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2653 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2654 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2655 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2656 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2657 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2658 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2659 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2660 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2661 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2662 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2663 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2664 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2665 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2666 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2667 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2668 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2669 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2670 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2671 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2672 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2673 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2674 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2675 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2679 static const bitmask_transtbl cflag_tbl[] = {
2680 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2681 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2682 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2683 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2684 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2685 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2686 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2687 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2688 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2689 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2690 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2691 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2692 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2693 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2694 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2695 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2696 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2697 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2698 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2699 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2700 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2701 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2702 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2703 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2704 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2705 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2706 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2707 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2708 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2709 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2710 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2714 static const bitmask_transtbl lflag_tbl[] = {
2715 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2716 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2717 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2718 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2719 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2720 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2721 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2722 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2723 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2724 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2725 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2726 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2727 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2728 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2729 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2733 static void target_to_host_termios (void *dst, const void *src)
2735 struct host_termios *host = dst;
2736 const struct target_termios *target = src;
2739 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2741 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2743 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2745 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2746 host->c_line = target->c_line;
2748 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2749 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2750 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2751 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2752 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2753 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2754 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2755 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2756 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2757 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2758 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2759 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2760 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2761 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2762 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2763 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2764 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2767 static void host_to_target_termios (void *dst, const void *src)
2769 struct target_termios *target = dst;
2770 const struct host_termios *host = src;
2773 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2775 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2777 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2779 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2780 target->c_line = host->c_line;
2782 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2783 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2784 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2785 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2786 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2787 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2788 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2789 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2790 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2791 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2792 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2793 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2794 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2795 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2796 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2797 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2798 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2801 static const StructEntry struct_termios_def = {
2802 .convert = { host_to_target_termios, target_to_host_termios },
2803 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2804 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2807 static bitmask_transtbl mmap_flags_tbl[] = {
2808 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2809 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2810 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2811 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2812 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2813 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2814 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2815 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2819 static bitmask_transtbl fcntl_flags_tbl[] = {
2820 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2821 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2822 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2823 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2824 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2825 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2826 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2827 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2828 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2829 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2830 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2831 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2832 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2833 #if defined(O_DIRECT)
2834 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2839 #if defined(TARGET_I386)
2841 /* NOTE: there is really one LDT for all the threads */
2842 static uint8_t *ldt_table;
2844 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2851 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2852 if (size > bytecount)
2854 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2856 return -TARGET_EFAULT;
2857 /* ??? Should this by byteswapped? */
2858 memcpy(p, ldt_table, size);
2859 unlock_user(p, ptr, size);
2863 /* XXX: add locking support */
2864 static abi_long write_ldt(CPUX86State *env,
2865 abi_ulong ptr, unsigned long bytecount, int oldmode)
2867 struct target_modify_ldt_ldt_s ldt_info;
2868 struct target_modify_ldt_ldt_s *target_ldt_info;
2869 int seg_32bit, contents, read_exec_only, limit_in_pages;
2870 int seg_not_present, useable, lm;
2871 uint32_t *lp, entry_1, entry_2;
2873 if (bytecount != sizeof(ldt_info))
2874 return -TARGET_EINVAL;
2875 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2876 return -TARGET_EFAULT;
2877 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2878 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2879 ldt_info.limit = tswap32(target_ldt_info->limit);
2880 ldt_info.flags = tswap32(target_ldt_info->flags);
2881 unlock_user_struct(target_ldt_info, ptr, 0);
2883 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2884 return -TARGET_EINVAL;
2885 seg_32bit = ldt_info.flags & 1;
2886 contents = (ldt_info.flags >> 1) & 3;
2887 read_exec_only = (ldt_info.flags >> 3) & 1;
2888 limit_in_pages = (ldt_info.flags >> 4) & 1;
2889 seg_not_present = (ldt_info.flags >> 5) & 1;
2890 useable = (ldt_info.flags >> 6) & 1;
2894 lm = (ldt_info.flags >> 7) & 1;
2896 if (contents == 3) {
2898 return -TARGET_EINVAL;
2899 if (seg_not_present == 0)
2900 return -TARGET_EINVAL;
2902 /* allocate the LDT */
2904 env->ldt.base = target_mmap(0,
2905 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
2906 PROT_READ|PROT_WRITE,
2907 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
2908 if (env->ldt.base == -1)
2909 return -TARGET_ENOMEM;
2910 memset(g2h(env->ldt.base), 0,
2911 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2912 env->ldt.limit = 0xffff;
2913 ldt_table = g2h(env->ldt.base);
2916 /* NOTE: same code as Linux kernel */
2917 /* Allow LDTs to be cleared by the user. */
2918 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2921 read_exec_only == 1 &&
2923 limit_in_pages == 0 &&
2924 seg_not_present == 1 &&
2932 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2933 (ldt_info.limit & 0x0ffff);
2934 entry_2 = (ldt_info.base_addr & 0xff000000) |
2935 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2936 (ldt_info.limit & 0xf0000) |
2937 ((read_exec_only ^ 1) << 9) |
2939 ((seg_not_present ^ 1) << 15) |
2941 (limit_in_pages << 23) |
2945 entry_2 |= (useable << 20);
2947 /* Install the new entry ... */
2949 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2950 lp[0] = tswap32(entry_1);
2951 lp[1] = tswap32(entry_2);
2955 /* specific and weird i386 syscalls */
2956 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2957 unsigned long bytecount)
2963 ret = read_ldt(ptr, bytecount);
2966 ret = write_ldt(env, ptr, bytecount, 1);
2969 ret = write_ldt(env, ptr, bytecount, 0);
2972 ret = -TARGET_ENOSYS;
2978 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2979 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2981 uint64_t *gdt_table = g2h(env->gdt.base);
2982 struct target_modify_ldt_ldt_s ldt_info;
2983 struct target_modify_ldt_ldt_s *target_ldt_info;
2984 int seg_32bit, contents, read_exec_only, limit_in_pages;
2985 int seg_not_present, useable, lm;
2986 uint32_t *lp, entry_1, entry_2;
2989 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2990 if (!target_ldt_info)
2991 return -TARGET_EFAULT;
2992 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2993 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2994 ldt_info.limit = tswap32(target_ldt_info->limit);
2995 ldt_info.flags = tswap32(target_ldt_info->flags);
2996 if (ldt_info.entry_number == -1) {
2997 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
2998 if (gdt_table[i] == 0) {
2999 ldt_info.entry_number = i;
3000 target_ldt_info->entry_number = tswap32(i);
3005 unlock_user_struct(target_ldt_info, ptr, 1);
3007 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3008 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3009 return -TARGET_EINVAL;
3010 seg_32bit = ldt_info.flags & 1;
3011 contents = (ldt_info.flags >> 1) & 3;
3012 read_exec_only = (ldt_info.flags >> 3) & 1;
3013 limit_in_pages = (ldt_info.flags >> 4) & 1;
3014 seg_not_present = (ldt_info.flags >> 5) & 1;
3015 useable = (ldt_info.flags >> 6) & 1;
3019 lm = (ldt_info.flags >> 7) & 1;
3022 if (contents == 3) {
3023 if (seg_not_present == 0)
3024 return -TARGET_EINVAL;
3027 /* NOTE: same code as Linux kernel */
3028 /* Allow LDTs to be cleared by the user. */
3029 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3030 if ((contents == 0 &&
3031 read_exec_only == 1 &&
3033 limit_in_pages == 0 &&
3034 seg_not_present == 1 &&
3042 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3043 (ldt_info.limit & 0x0ffff);
3044 entry_2 = (ldt_info.base_addr & 0xff000000) |
3045 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3046 (ldt_info.limit & 0xf0000) |
3047 ((read_exec_only ^ 1) << 9) |
3049 ((seg_not_present ^ 1) << 15) |
3051 (limit_in_pages << 23) |
3056 /* Install the new entry ... */
3058 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3059 lp[0] = tswap32(entry_1);
3060 lp[1] = tswap32(entry_2);
3064 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3066 struct target_modify_ldt_ldt_s *target_ldt_info;
3067 uint64_t *gdt_table = g2h(env->gdt.base);
3068 uint32_t base_addr, limit, flags;
3069 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3070 int seg_not_present, useable, lm;
3071 uint32_t *lp, entry_1, entry_2;
3073 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3074 if (!target_ldt_info)
3075 return -TARGET_EFAULT;
3076 idx = tswap32(target_ldt_info->entry_number);
3077 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3078 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3079 unlock_user_struct(target_ldt_info, ptr, 1);
3080 return -TARGET_EINVAL;
3082 lp = (uint32_t *)(gdt_table + idx);
3083 entry_1 = tswap32(lp[0]);
3084 entry_2 = tswap32(lp[1]);
3086 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3087 contents = (entry_2 >> 10) & 3;
3088 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3089 seg_32bit = (entry_2 >> 22) & 1;
3090 limit_in_pages = (entry_2 >> 23) & 1;
3091 useable = (entry_2 >> 20) & 1;
3095 lm = (entry_2 >> 21) & 1;
3097 flags = (seg_32bit << 0) | (contents << 1) |
3098 (read_exec_only << 3) | (limit_in_pages << 4) |
3099 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3100 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3101 base_addr = (entry_1 >> 16) |
3102 (entry_2 & 0xff000000) |
3103 ((entry_2 & 0xff) << 16);
3104 target_ldt_info->base_addr = tswapl(base_addr);
3105 target_ldt_info->limit = tswap32(limit);
3106 target_ldt_info->flags = tswap32(flags);
3107 unlock_user_struct(target_ldt_info, ptr, 1);
3110 #endif /* TARGET_I386 && TARGET_ABI32 */
3112 #ifndef TARGET_ABI32
3113 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3120 case TARGET_ARCH_SET_GS:
3121 case TARGET_ARCH_SET_FS:
3122 if (code == TARGET_ARCH_SET_GS)
3126 cpu_x86_load_seg(env, idx, 0);
3127 env->segs[idx].base = addr;
3129 case TARGET_ARCH_GET_GS:
3130 case TARGET_ARCH_GET_FS:
3131 if (code == TARGET_ARCH_GET_GS)
3135 val = env->segs[idx].base;
3136 if (put_user(val, addr, abi_ulong))
3137 return -TARGET_EFAULT;
3140 ret = -TARGET_EINVAL;
3147 #endif /* defined(TARGET_I386) */
3149 #if defined(USE_NPTL)
3151 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3153 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3156 pthread_mutex_t mutex;
3157 pthread_cond_t cond;
3160 abi_ulong child_tidptr;
3161 abi_ulong parent_tidptr;
3165 static void *clone_func(void *arg)
3167 new_thread_info *info = arg;
3172 info->tid = gettid();
3173 if (info->child_tidptr)
3174 put_user_u32(info->tid, info->child_tidptr);
3175 if (info->parent_tidptr)
3176 put_user_u32(info->tid, info->parent_tidptr);
3177 /* Enable signals. */
3178 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3179 /* Signal to the parent that we're ready. */
3180 pthread_mutex_lock(&info->mutex);
3181 pthread_cond_broadcast(&info->cond);
3182 pthread_mutex_unlock(&info->mutex);
3183 /* Wait until the parent has finshed initializing the tls state. */
3184 pthread_mutex_lock(&clone_lock);
3185 pthread_mutex_unlock(&clone_lock);
3191 /* this stack is the equivalent of the kernel stack associated with a
3193 #define NEW_STACK_SIZE 8192
3195 static int clone_func(void *arg)
3197 CPUState *env = arg;
3204 /* do_fork() Must return host values and target errnos (unlike most
3205 do_*() functions). */
3206 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3207 abi_ulong parent_tidptr, target_ulong newtls,
3208 abi_ulong child_tidptr)
3214 #if defined(USE_NPTL)
3215 unsigned int nptl_flags;
3219 /* Emulate vfork() with fork() */
3220 if (flags & CLONE_VFORK)
3221 flags &= ~(CLONE_VFORK | CLONE_VM);
3223 if (flags & CLONE_VM) {
3224 #if defined(USE_NPTL)
3225 new_thread_info info;
3226 pthread_attr_t attr;
3228 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3229 init_task_state(ts);
3230 new_stack = ts->stack;
3231 /* we create a new CPU instance. */
3232 new_env = cpu_copy(env);
3233 /* Init regs that differ from the parent. */
3234 cpu_clone_regs(new_env, newsp);
3235 new_env->opaque = ts;
3236 #if defined(USE_NPTL)
3238 flags &= ~CLONE_NPTL_FLAGS2;
3240 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3241 if (nptl_flags & CLONE_SETTLS)
3242 cpu_set_tls (new_env, newtls);
3244 /* Grab a mutex so that thread setup appears atomic. */
3245 pthread_mutex_lock(&clone_lock);
3247 memset(&info, 0, sizeof(info));
3248 pthread_mutex_init(&info.mutex, NULL);
3249 pthread_mutex_lock(&info.mutex);
3250 pthread_cond_init(&info.cond, NULL);
3252 if (nptl_flags & CLONE_CHILD_SETTID)
3253 info.child_tidptr = child_tidptr;
3254 if (nptl_flags & CLONE_PARENT_SETTID)
3255 info.parent_tidptr = parent_tidptr;
3257 ret = pthread_attr_init(&attr);
3258 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3259 /* It is not safe to deliver signals until the child has finished
3260 initializing, so temporarily block all signals. */
3261 sigfillset(&sigmask);
3262 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3264 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3266 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3267 pthread_attr_destroy(&attr);
3269 /* Wait for the child to initialize. */
3270 pthread_cond_wait(&info.cond, &info.mutex);
3272 if (flags & CLONE_PARENT_SETTID)
3273 put_user_u32(ret, parent_tidptr);
3277 pthread_mutex_unlock(&info.mutex);
3278 pthread_cond_destroy(&info.cond);
3279 pthread_mutex_destroy(&info.mutex);
3280 pthread_mutex_unlock(&clone_lock);
3282 if (flags & CLONE_NPTL_FLAGS2)
3284 /* This is probably going to die very quickly, but do it anyway. */
3286 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3288 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3292 /* if no CLONE_VM, we consider it is a fork */
3293 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3298 /* Child Process. */
3299 cpu_clone_regs(env, newsp);
3301 #if defined(USE_NPTL)
3302 /* There is a race condition here. The parent process could
3303 theoretically read the TID in the child process before the child
3304 tid is set. This would require using either ptrace
3305 (not implemented) or having *_tidptr to point at a shared memory
3306 mapping. We can't repeat the spinlock hack used above because
3307 the child process gets its own copy of the lock. */
3308 if (flags & CLONE_CHILD_SETTID)
3309 put_user_u32(gettid(), child_tidptr);
3310 if (flags & CLONE_PARENT_SETTID)
3311 put_user_u32(gettid(), parent_tidptr);
3312 ts = (TaskState *)env->opaque;
3313 if (flags & CLONE_SETTLS)
3314 cpu_set_tls (env, newtls);
3315 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3324 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3327 struct target_flock *target_fl;
3328 struct flock64 fl64;
3329 struct target_flock64 *target_fl64;
3333 case TARGET_F_GETLK:
3334 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3335 return -TARGET_EFAULT;
3336 fl.l_type = tswap16(target_fl->l_type);
3337 fl.l_whence = tswap16(target_fl->l_whence);
3338 fl.l_start = tswapl(target_fl->l_start);
3339 fl.l_len = tswapl(target_fl->l_len);
3340 fl.l_pid = tswapl(target_fl->l_pid);
3341 unlock_user_struct(target_fl, arg, 0);
3342 ret = get_errno(fcntl(fd, cmd, &fl));
3344 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3345 return -TARGET_EFAULT;
3346 target_fl->l_type = tswap16(fl.l_type);
3347 target_fl->l_whence = tswap16(fl.l_whence);
3348 target_fl->l_start = tswapl(fl.l_start);
3349 target_fl->l_len = tswapl(fl.l_len);
3350 target_fl->l_pid = tswapl(fl.l_pid);
3351 unlock_user_struct(target_fl, arg, 1);
3355 case TARGET_F_SETLK:
3356 case TARGET_F_SETLKW:
3357 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3358 return -TARGET_EFAULT;
3359 fl.l_type = tswap16(target_fl->l_type);
3360 fl.l_whence = tswap16(target_fl->l_whence);
3361 fl.l_start = tswapl(target_fl->l_start);
3362 fl.l_len = tswapl(target_fl->l_len);
3363 fl.l_pid = tswapl(target_fl->l_pid);
3364 unlock_user_struct(target_fl, arg, 0);
3365 ret = get_errno(fcntl(fd, cmd, &fl));
3368 case TARGET_F_GETLK64:
3369 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3370 return -TARGET_EFAULT;
3371 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3372 fl64.l_whence = tswap16(target_fl64->l_whence);
3373 fl64.l_start = tswapl(target_fl64->l_start);
3374 fl64.l_len = tswapl(target_fl64->l_len);
3375 fl64.l_pid = tswap16(target_fl64->l_pid);
3376 unlock_user_struct(target_fl64, arg, 0);
3377 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3379 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3380 return -TARGET_EFAULT;
3381 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3382 target_fl64->l_whence = tswap16(fl64.l_whence);
3383 target_fl64->l_start = tswapl(fl64.l_start);
3384 target_fl64->l_len = tswapl(fl64.l_len);
3385 target_fl64->l_pid = tswapl(fl64.l_pid);
3386 unlock_user_struct(target_fl64, arg, 1);
3389 case TARGET_F_SETLK64:
3390 case TARGET_F_SETLKW64:
3391 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3392 return -TARGET_EFAULT;
3393 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3394 fl64.l_whence = tswap16(target_fl64->l_whence);
3395 fl64.l_start = tswapl(target_fl64->l_start);
3396 fl64.l_len = tswapl(target_fl64->l_len);
3397 fl64.l_pid = tswap16(target_fl64->l_pid);
3398 unlock_user_struct(target_fl64, arg, 0);
3399 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3403 ret = get_errno(fcntl(fd, cmd, arg));
3405 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3410 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3414 ret = get_errno(fcntl(fd, cmd, arg));
3422 static inline int high2lowuid(int uid)
3430 static inline int high2lowgid(int gid)
3438 static inline int low2highuid(int uid)
3440 if ((int16_t)uid == -1)
3446 static inline int low2highgid(int gid)
3448 if ((int16_t)gid == -1)
3454 #endif /* USE_UID16 */
3456 void syscall_init(void)
3459 const argtype *arg_type;
3463 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3464 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3465 #include "syscall_types.h"
3467 #undef STRUCT_SPECIAL
3469 /* we patch the ioctl size if necessary. We rely on the fact that
3470 no ioctl has all the bits at '1' in the size field */
3472 while (ie->target_cmd != 0) {
3473 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3474 TARGET_IOC_SIZEMASK) {
3475 arg_type = ie->arg_type;
3476 if (arg_type[0] != TYPE_PTR) {
3477 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3482 size = thunk_type_size(arg_type, 0);
3483 ie->target_cmd = (ie->target_cmd &
3484 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3485 (size << TARGET_IOC_SIZESHIFT);
3488 /* Build target_to_host_errno_table[] table from
3489 * host_to_target_errno_table[]. */
3490 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3491 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3493 /* automatic consistency check if same arch */
3494 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3495 (defined(__x86_64__) && defined(TARGET_X86_64))
3496 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3497 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3498 ie->name, ie->target_cmd, ie->host_cmd);
3505 #if TARGET_ABI_BITS == 32
3506 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3508 #ifdef TARGET_WORDS_BIGENDIAN
3509 return ((uint64_t)word0 << 32) | word1;
3511 return ((uint64_t)word1 << 32) | word0;
3514 #else /* TARGET_ABI_BITS == 32 */
3515 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3519 #endif /* TARGET_ABI_BITS != 32 */
3521 #ifdef TARGET_NR_truncate64
3522 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3528 if (((CPUARMState *)cpu_env)->eabi)
3534 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3538 #ifdef TARGET_NR_ftruncate64
3539 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3545 if (((CPUARMState *)cpu_env)->eabi)
3551 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3555 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3556 abi_ulong target_addr)
3558 struct target_timespec *target_ts;
3560 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3561 return -TARGET_EFAULT;
3562 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3563 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3564 unlock_user_struct(target_ts, target_addr, 0);
3568 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3569 struct timespec *host_ts)
3571 struct target_timespec *target_ts;
3573 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3574 return -TARGET_EFAULT;
3575 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3576 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3577 unlock_user_struct(target_ts, target_addr, 1);
3581 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3582 static inline abi_long host_to_target_stat64(void *cpu_env,
3583 abi_ulong target_addr,
3584 struct stat *host_st)
3587 if (((CPUARMState *)cpu_env)->eabi) {
3588 struct target_eabi_stat64 *target_st;
3590 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3591 return -TARGET_EFAULT;
3592 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3593 __put_user(host_st->st_dev, &target_st->st_dev);
3594 __put_user(host_st->st_ino, &target_st->st_ino);
3595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3596 __put_user(host_st->st_ino, &target_st->__st_ino);
3598 __put_user(host_st->st_mode, &target_st->st_mode);
3599 __put_user(host_st->st_nlink, &target_st->st_nlink);
3600 __put_user(host_st->st_uid, &target_st->st_uid);
3601 __put_user(host_st->st_gid, &target_st->st_gid);
3602 __put_user(host_st->st_rdev, &target_st->st_rdev);
3603 __put_user(host_st->st_size, &target_st->st_size);
3604 __put_user(host_st->st_blksize, &target_st->st_blksize);
3605 __put_user(host_st->st_blocks, &target_st->st_blocks);
3606 __put_user(host_st->st_atime, &target_st->target_st_atime);
3607 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3608 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3609 unlock_user_struct(target_st, target_addr, 1);
3613 #if TARGET_LONG_BITS == 64
3614 struct target_stat *target_st;
3616 struct target_stat64 *target_st;
3619 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3620 return -TARGET_EFAULT;
3621 memset(target_st, 0, sizeof(*target_st));
3622 __put_user(host_st->st_dev, &target_st->st_dev);
3623 __put_user(host_st->st_ino, &target_st->st_ino);
3624 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3625 __put_user(host_st->st_ino, &target_st->__st_ino);
3627 __put_user(host_st->st_mode, &target_st->st_mode);
3628 __put_user(host_st->st_nlink, &target_st->st_nlink);
3629 __put_user(host_st->st_uid, &target_st->st_uid);
3630 __put_user(host_st->st_gid, &target_st->st_gid);
3631 __put_user(host_st->st_rdev, &target_st->st_rdev);
3632 /* XXX: better use of kernel struct */
3633 __put_user(host_st->st_size, &target_st->st_size);
3634 __put_user(host_st->st_blksize, &target_st->st_blksize);
3635 __put_user(host_st->st_blocks, &target_st->st_blocks);
3636 __put_user(host_st->st_atime, &target_st->target_st_atime);
3637 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3638 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3639 unlock_user_struct(target_st, target_addr, 1);
3646 #if defined(USE_NPTL)
3647 /* ??? Using host futex calls even when target atomic operations
3648 are not really atomic probably breaks things. However implementing
3649 futexes locally would make futexes shared between multiple processes
3650 tricky. However they're probably useless because guest atomic
3651 operations won't work either. */
3652 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3653 target_ulong uaddr2, int val3)
3655 struct timespec ts, *pts;
3657 /* ??? We assume FUTEX_* constants are the same on both host
3663 target_to_host_timespec(pts, timeout);
3667 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3670 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3672 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3674 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3675 NULL, g2h(uaddr2), 0));
3676 case FUTEX_CMP_REQUEUE:
3677 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3678 NULL, g2h(uaddr2), tswap32(val3)));
3680 return -TARGET_ENOSYS;
3685 int get_osversion(void)
3687 static int osversion;
3688 struct new_utsname buf;
3693 if (qemu_uname_release && *qemu_uname_release) {
3694 s = qemu_uname_release;
3696 if (sys_uname(&buf))
3701 for (i = 0; i < 3; i++) {
3703 while (*s >= '0' && *s <= '9') {
3708 tmp = (tmp << 8) + n;
3716 /* do_syscall() should always have a single exit point at the end so
3717 that actions, such as logging of syscall results, can be performed.
3718 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3719 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3720 abi_long arg2, abi_long arg3, abi_long arg4,
3721 abi_long arg5, abi_long arg6)
3729 gemu_log("syscall %d", num);
3732 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3735 case TARGET_NR_exit:
3739 gdb_exit(cpu_env, arg1);
3740 /* XXX: should free thread stack and CPU env */
3742 ret = 0; /* avoid warning */
3744 case TARGET_NR_read:
3748 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3750 ret = get_errno(read(arg1, p, arg3));
3751 unlock_user(p, arg2, ret);
3754 case TARGET_NR_write:
3755 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3757 ret = get_errno(write(arg1, p, arg3));
3758 unlock_user(p, arg2, 0);
3760 case TARGET_NR_open:
3761 if (!(p = lock_user_string(arg1)))
3763 ret = get_errno(open(path(p),
3764 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3766 unlock_user(p, arg1, 0);
3768 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3769 case TARGET_NR_openat:
3770 if (!(p = lock_user_string(arg2)))
3772 ret = get_errno(sys_openat(arg1,
3774 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3776 unlock_user(p, arg2, 0);
3779 case TARGET_NR_close:
3780 ret = get_errno(close(arg1));
3785 case TARGET_NR_fork:
3786 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3788 #ifdef TARGET_NR_waitpid
3789 case TARGET_NR_waitpid:
3792 ret = get_errno(waitpid(arg1, &status, arg3));
3793 if (!is_error(ret) && arg2
3794 && put_user_s32(status, arg2))
3799 #ifdef TARGET_NR_waitid
3800 case TARGET_NR_waitid:
3804 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3805 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3806 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3808 host_to_target_siginfo(p, &info);
3809 unlock_user(p, arg3, sizeof(target_siginfo_t));
3814 #ifdef TARGET_NR_creat /* not on alpha */
3815 case TARGET_NR_creat:
3816 if (!(p = lock_user_string(arg1)))
3818 ret = get_errno(creat(p, arg2));
3819 unlock_user(p, arg1, 0);
3822 case TARGET_NR_link:
3825 p = lock_user_string(arg1);
3826 p2 = lock_user_string(arg2);
3828 ret = -TARGET_EFAULT;
3830 ret = get_errno(link(p, p2));
3831 unlock_user(p2, arg2, 0);
3832 unlock_user(p, arg1, 0);
3835 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3836 case TARGET_NR_linkat:
3841 p = lock_user_string(arg2);
3842 p2 = lock_user_string(arg4);
3844 ret = -TARGET_EFAULT;
3846 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3847 unlock_user(p, arg2, 0);
3848 unlock_user(p2, arg4, 0);
3852 case TARGET_NR_unlink:
3853 if (!(p = lock_user_string(arg1)))
3855 ret = get_errno(unlink(p));
3856 unlock_user(p, arg1, 0);
3858 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3859 case TARGET_NR_unlinkat:
3860 if (!(p = lock_user_string(arg2)))
3862 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3863 unlock_user(p, arg2, 0);
3866 case TARGET_NR_execve:
3868 char **argp, **envp;
3871 abi_ulong guest_argp;
3872 abi_ulong guest_envp;
3878 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3879 if (get_user_ual(addr, gp))
3887 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3888 if (get_user_ual(addr, gp))
3895 argp = alloca((argc + 1) * sizeof(void *));
3896 envp = alloca((envc + 1) * sizeof(void *));
3898 for (gp = guest_argp, q = argp; gp;
3899 gp += sizeof(abi_ulong), q++) {
3900 if (get_user_ual(addr, gp))
3904 if (!(*q = lock_user_string(addr)))
3909 for (gp = guest_envp, q = envp; gp;
3910 gp += sizeof(abi_ulong), q++) {
3911 if (get_user_ual(addr, gp))
3915 if (!(*q = lock_user_string(addr)))
3920 if (!(p = lock_user_string(arg1)))
3922 ret = get_errno(execve(p, argp, envp));
3923 unlock_user(p, arg1, 0);
3928 ret = -TARGET_EFAULT;
3931 for (gp = guest_argp, q = argp; *q;
3932 gp += sizeof(abi_ulong), q++) {
3933 if (get_user_ual(addr, gp)
3936 unlock_user(*q, addr, 0);
3938 for (gp = guest_envp, q = envp; *q;
3939 gp += sizeof(abi_ulong), q++) {
3940 if (get_user_ual(addr, gp)
3943 unlock_user(*q, addr, 0);
3947 case TARGET_NR_chdir:
3948 if (!(p = lock_user_string(arg1)))
3950 ret = get_errno(chdir(p));
3951 unlock_user(p, arg1, 0);
3953 #ifdef TARGET_NR_time
3954 case TARGET_NR_time:
3957 ret = get_errno(time(&host_time));
3960 && put_user_sal(host_time, arg1))
3965 case TARGET_NR_mknod:
3966 if (!(p = lock_user_string(arg1)))
3968 ret = get_errno(mknod(p, arg2, arg3));
3969 unlock_user(p, arg1, 0);
3971 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3972 case TARGET_NR_mknodat:
3973 if (!(p = lock_user_string(arg2)))
3975 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3976 unlock_user(p, arg2, 0);
3979 case TARGET_NR_chmod:
3980 if (!(p = lock_user_string(arg1)))
3982 ret = get_errno(chmod(p, arg2));
3983 unlock_user(p, arg1, 0);
3985 #ifdef TARGET_NR_break
3986 case TARGET_NR_break:
3989 #ifdef TARGET_NR_oldstat
3990 case TARGET_NR_oldstat:
3993 case TARGET_NR_lseek:
3994 ret = get_errno(lseek(arg1, arg2, arg3));
3996 #ifdef TARGET_NR_getxpid
3997 case TARGET_NR_getxpid:
3999 case TARGET_NR_getpid:
4001 ret = get_errno(getpid());
4003 case TARGET_NR_mount:
4005 /* need to look at the data field */
4007 p = lock_user_string(arg1);
4008 p2 = lock_user_string(arg2);
4009 p3 = lock_user_string(arg3);
4010 if (!p || !p2 || !p3)
4011 ret = -TARGET_EFAULT;
4013 /* FIXME - arg5 should be locked, but it isn't clear how to
4014 * do that since it's not guaranteed to be a NULL-terminated
4017 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4018 unlock_user(p, arg1, 0);
4019 unlock_user(p2, arg2, 0);
4020 unlock_user(p3, arg3, 0);
4023 #ifdef TARGET_NR_umount
4024 case TARGET_NR_umount:
4025 if (!(p = lock_user_string(arg1)))
4027 ret = get_errno(umount(p));
4028 unlock_user(p, arg1, 0);
4031 #ifdef TARGET_NR_stime /* not on alpha */
4032 case TARGET_NR_stime:
4035 if (get_user_sal(host_time, arg1))
4037 ret = get_errno(stime(&host_time));
4041 case TARGET_NR_ptrace:
4043 #ifdef TARGET_NR_alarm /* not on alpha */
4044 case TARGET_NR_alarm:
4048 #ifdef TARGET_NR_oldfstat
4049 case TARGET_NR_oldfstat:
4052 #ifdef TARGET_NR_pause /* not on alpha */
4053 case TARGET_NR_pause:
4054 ret = get_errno(pause());
4057 #ifdef TARGET_NR_utime
4058 case TARGET_NR_utime:
4060 struct utimbuf tbuf, *host_tbuf;
4061 struct target_utimbuf *target_tbuf;
4063 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4065 tbuf.actime = tswapl(target_tbuf->actime);
4066 tbuf.modtime = tswapl(target_tbuf->modtime);
4067 unlock_user_struct(target_tbuf, arg2, 0);
4072 if (!(p = lock_user_string(arg1)))
4074 ret = get_errno(utime(p, host_tbuf));
4075 unlock_user(p, arg1, 0);
4079 case TARGET_NR_utimes:
4081 struct timeval *tvp, tv[2];
4083 if (copy_from_user_timeval(&tv[0], arg2)
4084 || copy_from_user_timeval(&tv[1],
4085 arg2 + sizeof(struct target_timeval)))
4091 if (!(p = lock_user_string(arg1)))
4093 ret = get_errno(utimes(p, tvp));
4094 unlock_user(p, arg1, 0);
4097 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4098 case TARGET_NR_futimesat:
4100 struct timeval *tvp, tv[2];
4102 if (copy_from_user_timeval(&tv[0], arg3)
4103 || copy_from_user_timeval(&tv[1],
4104 arg3 + sizeof(struct target_timeval)))
4110 if (!(p = lock_user_string(arg2)))
4112 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4113 unlock_user(p, arg2, 0);
4117 #ifdef TARGET_NR_stty
4118 case TARGET_NR_stty:
4121 #ifdef TARGET_NR_gtty
4122 case TARGET_NR_gtty:
4125 case TARGET_NR_access:
4126 if (!(p = lock_user_string(arg1)))
4128 ret = get_errno(access(p, arg2));
4129 unlock_user(p, arg1, 0);
4131 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4132 case TARGET_NR_faccessat:
4133 if (!(p = lock_user_string(arg2)))
4135 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
4136 unlock_user(p, arg2, 0);
4139 #ifdef TARGET_NR_nice /* not on alpha */
4140 case TARGET_NR_nice:
4141 ret = get_errno(nice(arg1));
4144 #ifdef TARGET_NR_ftime
4145 case TARGET_NR_ftime:
4148 case TARGET_NR_sync:
4152 case TARGET_NR_kill:
4153 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4155 case TARGET_NR_rename:
4158 p = lock_user_string(arg1);
4159 p2 = lock_user_string(arg2);
4161 ret = -TARGET_EFAULT;
4163 ret = get_errno(rename(p, p2));
4164 unlock_user(p2, arg2, 0);
4165 unlock_user(p, arg1, 0);
4168 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4169 case TARGET_NR_renameat:
4172 p = lock_user_string(arg2);
4173 p2 = lock_user_string(arg4);
4175 ret = -TARGET_EFAULT;
4177 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4178 unlock_user(p2, arg4, 0);
4179 unlock_user(p, arg2, 0);
4183 case TARGET_NR_mkdir:
4184 if (!(p = lock_user_string(arg1)))
4186 ret = get_errno(mkdir(p, arg2));
4187 unlock_user(p, arg1, 0);
4189 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4190 case TARGET_NR_mkdirat:
4191 if (!(p = lock_user_string(arg2)))
4193 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4194 unlock_user(p, arg2, 0);
4197 case TARGET_NR_rmdir:
4198 if (!(p = lock_user_string(arg1)))
4200 ret = get_errno(rmdir(p));
4201 unlock_user(p, arg1, 0);
4204 ret = get_errno(dup(arg1));
4206 case TARGET_NR_pipe:
4209 ret = get_errno(pipe(host_pipe));
4210 if (!is_error(ret)) {
4211 #if defined(TARGET_MIPS)
4212 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4213 env->active_tc.gpr[3] = host_pipe[1];
4215 #elif defined(TARGET_SH4)
4216 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4219 if (put_user_s32(host_pipe[0], arg1)
4220 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4226 case TARGET_NR_times:
4228 struct target_tms *tmsp;
4230 ret = get_errno(times(&tms));
4232 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4235 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4236 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4237 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4238 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4241 ret = host_to_target_clock_t(ret);
4244 #ifdef TARGET_NR_prof
4245 case TARGET_NR_prof:
4248 #ifdef TARGET_NR_signal
4249 case TARGET_NR_signal:
4252 case TARGET_NR_acct:
4254 ret = get_errno(acct(NULL));
4256 if (!(p = lock_user_string(arg1)))
4258 ret = get_errno(acct(path(p)));
4259 unlock_user(p, arg1, 0);
4262 #ifdef TARGET_NR_umount2 /* not on alpha */
4263 case TARGET_NR_umount2:
4264 if (!(p = lock_user_string(arg1)))
4266 ret = get_errno(umount2(p, arg2));
4267 unlock_user(p, arg1, 0);
4270 #ifdef TARGET_NR_lock
4271 case TARGET_NR_lock:
4274 case TARGET_NR_ioctl:
4275 ret = do_ioctl(arg1, arg2, arg3);
4277 case TARGET_NR_fcntl:
4278 ret = do_fcntl(arg1, arg2, arg3);
4280 #ifdef TARGET_NR_mpx
4284 case TARGET_NR_setpgid:
4285 ret = get_errno(setpgid(arg1, arg2));
4287 #ifdef TARGET_NR_ulimit
4288 case TARGET_NR_ulimit:
4291 #ifdef TARGET_NR_oldolduname
4292 case TARGET_NR_oldolduname:
4295 case TARGET_NR_umask:
4296 ret = get_errno(umask(arg1));
4298 case TARGET_NR_chroot:
4299 if (!(p = lock_user_string(arg1)))
4301 ret = get_errno(chroot(p));
4302 unlock_user(p, arg1, 0);
4304 case TARGET_NR_ustat:
4306 case TARGET_NR_dup2:
4307 ret = get_errno(dup2(arg1, arg2));
4309 #ifdef TARGET_NR_getppid /* not on alpha */
4310 case TARGET_NR_getppid:
4311 ret = get_errno(getppid());
4314 case TARGET_NR_getpgrp:
4315 ret = get_errno(getpgrp());
4317 case TARGET_NR_setsid:
4318 ret = get_errno(setsid());
4320 #ifdef TARGET_NR_sigaction
4321 case TARGET_NR_sigaction:
4323 #if !defined(TARGET_MIPS)
4324 struct target_old_sigaction *old_act;
4325 struct target_sigaction act, oact, *pact;
4327 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4329 act._sa_handler = old_act->_sa_handler;
4330 target_siginitset(&act.sa_mask, old_act->sa_mask);
4331 act.sa_flags = old_act->sa_flags;
4332 act.sa_restorer = old_act->sa_restorer;
4333 unlock_user_struct(old_act, arg2, 0);
4338 ret = get_errno(do_sigaction(arg1, pact, &oact));
4339 if (!is_error(ret) && arg3) {
4340 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4342 old_act->_sa_handler = oact._sa_handler;
4343 old_act->sa_mask = oact.sa_mask.sig[0];
4344 old_act->sa_flags = oact.sa_flags;
4345 old_act->sa_restorer = oact.sa_restorer;
4346 unlock_user_struct(old_act, arg3, 1);
4349 struct target_sigaction act, oact, *pact, *old_act;
4352 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4354 act._sa_handler = old_act->_sa_handler;
4355 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4356 act.sa_flags = old_act->sa_flags;
4357 unlock_user_struct(old_act, arg2, 0);
4363 ret = get_errno(do_sigaction(arg1, pact, &oact));
4365 if (!is_error(ret) && arg3) {
4366 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4368 old_act->_sa_handler = oact._sa_handler;
4369 old_act->sa_flags = oact.sa_flags;
4370 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4371 old_act->sa_mask.sig[1] = 0;
4372 old_act->sa_mask.sig[2] = 0;
4373 old_act->sa_mask.sig[3] = 0;
4374 unlock_user_struct(old_act, arg3, 1);
4380 case TARGET_NR_rt_sigaction:
4382 struct target_sigaction *act;
4383 struct target_sigaction *oact;
4386 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4391 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4392 ret = -TARGET_EFAULT;
4393 goto rt_sigaction_fail;
4397 ret = get_errno(do_sigaction(arg1, act, oact));
4400 unlock_user_struct(act, arg2, 0);
4402 unlock_user_struct(oact, arg3, 1);
4405 #ifdef TARGET_NR_sgetmask /* not on alpha */
4406 case TARGET_NR_sgetmask:
4409 abi_ulong target_set;
4410 sigprocmask(0, NULL, &cur_set);
4411 host_to_target_old_sigset(&target_set, &cur_set);
4416 #ifdef TARGET_NR_ssetmask /* not on alpha */
4417 case TARGET_NR_ssetmask:
4419 sigset_t set, oset, cur_set;
4420 abi_ulong target_set = arg1;
4421 sigprocmask(0, NULL, &cur_set);
4422 target_to_host_old_sigset(&set, &target_set);
4423 sigorset(&set, &set, &cur_set);
4424 sigprocmask(SIG_SETMASK, &set, &oset);
4425 host_to_target_old_sigset(&target_set, &oset);
4430 #ifdef TARGET_NR_sigprocmask
4431 case TARGET_NR_sigprocmask:
4434 sigset_t set, oldset, *set_ptr;
4438 case TARGET_SIG_BLOCK:
4441 case TARGET_SIG_UNBLOCK:
4444 case TARGET_SIG_SETMASK:
4448 ret = -TARGET_EINVAL;
4451 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4453 target_to_host_old_sigset(&set, p);
4454 unlock_user(p, arg2, 0);
4460 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4461 if (!is_error(ret) && arg3) {
4462 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4464 host_to_target_old_sigset(p, &oldset);
4465 unlock_user(p, arg3, sizeof(target_sigset_t));
4470 case TARGET_NR_rt_sigprocmask:
4473 sigset_t set, oldset, *set_ptr;
4477 case TARGET_SIG_BLOCK:
4480 case TARGET_SIG_UNBLOCK:
4483 case TARGET_SIG_SETMASK:
4487 ret = -TARGET_EINVAL;
4490 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4492 target_to_host_sigset(&set, p);
4493 unlock_user(p, arg2, 0);
4499 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4500 if (!is_error(ret) && arg3) {
4501 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4503 host_to_target_sigset(p, &oldset);
4504 unlock_user(p, arg3, sizeof(target_sigset_t));
4508 #ifdef TARGET_NR_sigpending
4509 case TARGET_NR_sigpending:
4512 ret = get_errno(sigpending(&set));
4513 if (!is_error(ret)) {
4514 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4516 host_to_target_old_sigset(p, &set);
4517 unlock_user(p, arg1, sizeof(target_sigset_t));
4522 case TARGET_NR_rt_sigpending:
4525 ret = get_errno(sigpending(&set));
4526 if (!is_error(ret)) {
4527 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4529 host_to_target_sigset(p, &set);
4530 unlock_user(p, arg1, sizeof(target_sigset_t));
4534 #ifdef TARGET_NR_sigsuspend
4535 case TARGET_NR_sigsuspend:
4538 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4540 target_to_host_old_sigset(&set, p);
4541 unlock_user(p, arg1, 0);
4542 ret = get_errno(sigsuspend(&set));
4546 case TARGET_NR_rt_sigsuspend:
4549 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4551 target_to_host_sigset(&set, p);
4552 unlock_user(p, arg1, 0);
4553 ret = get_errno(sigsuspend(&set));
4556 case TARGET_NR_rt_sigtimedwait:
4559 struct timespec uts, *puts;
4562 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4564 target_to_host_sigset(&set, p);
4565 unlock_user(p, arg1, 0);
4568 target_to_host_timespec(puts, arg3);
4572 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4573 if (!is_error(ret) && arg2) {
4574 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4576 host_to_target_siginfo(p, &uinfo);
4577 unlock_user(p, arg2, sizeof(target_siginfo_t));
4581 case TARGET_NR_rt_sigqueueinfo:
4584 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4586 target_to_host_siginfo(&uinfo, p);
4587 unlock_user(p, arg1, 0);
4588 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4591 #ifdef TARGET_NR_sigreturn
4592 case TARGET_NR_sigreturn:
4593 /* NOTE: ret is eax, so not transcoding must be done */
4594 ret = do_sigreturn(cpu_env);
4597 case TARGET_NR_rt_sigreturn:
4598 /* NOTE: ret is eax, so not transcoding must be done */
4599 ret = do_rt_sigreturn(cpu_env);
4601 case TARGET_NR_sethostname:
4602 if (!(p = lock_user_string(arg1)))
4604 ret = get_errno(sethostname(p, arg2));
4605 unlock_user(p, arg1, 0);
4607 case TARGET_NR_setrlimit:
4609 /* XXX: convert resource ? */
4610 int resource = arg1;
4611 struct target_rlimit *target_rlim;
4613 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4615 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4616 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4617 unlock_user_struct(target_rlim, arg2, 0);
4618 ret = get_errno(setrlimit(resource, &rlim));
4621 case TARGET_NR_getrlimit:
4623 /* XXX: convert resource ? */
4624 int resource = arg1;
4625 struct target_rlimit *target_rlim;
4628 ret = get_errno(getrlimit(resource, &rlim));
4629 if (!is_error(ret)) {
4630 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4632 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4633 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4634 unlock_user_struct(target_rlim, arg2, 1);
4638 case TARGET_NR_getrusage:
4640 struct rusage rusage;
4641 ret = get_errno(getrusage(arg1, &rusage));
4642 if (!is_error(ret)) {
4643 host_to_target_rusage(arg2, &rusage);
4647 case TARGET_NR_gettimeofday:
4650 ret = get_errno(gettimeofday(&tv, NULL));
4651 if (!is_error(ret)) {
4652 if (copy_to_user_timeval(arg1, &tv))
4657 case TARGET_NR_settimeofday:
4660 if (copy_from_user_timeval(&tv, arg1))
4662 ret = get_errno(settimeofday(&tv, NULL));
4665 #ifdef TARGET_NR_select
4666 case TARGET_NR_select:
4668 struct target_sel_arg_struct *sel;
4669 abi_ulong inp, outp, exp, tvp;
4672 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4674 nsel = tswapl(sel->n);
4675 inp = tswapl(sel->inp);
4676 outp = tswapl(sel->outp);
4677 exp = tswapl(sel->exp);
4678 tvp = tswapl(sel->tvp);
4679 unlock_user_struct(sel, arg1, 0);
4680 ret = do_select(nsel, inp, outp, exp, tvp);
4684 case TARGET_NR_symlink:
4687 p = lock_user_string(arg1);
4688 p2 = lock_user_string(arg2);
4690 ret = -TARGET_EFAULT;
4692 ret = get_errno(symlink(p, p2));
4693 unlock_user(p2, arg2, 0);
4694 unlock_user(p, arg1, 0);
4697 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4698 case TARGET_NR_symlinkat:
4701 p = lock_user_string(arg1);
4702 p2 = lock_user_string(arg3);
4704 ret = -TARGET_EFAULT;
4706 ret = get_errno(sys_symlinkat(p, arg2, p2));
4707 unlock_user(p2, arg3, 0);
4708 unlock_user(p, arg1, 0);
4712 #ifdef TARGET_NR_oldlstat
4713 case TARGET_NR_oldlstat:
4716 case TARGET_NR_readlink:
4719 p = lock_user_string(arg1);
4720 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4722 ret = -TARGET_EFAULT;
4724 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0)
4725 ret = get_errno(snprintf((char *)p2, arg3, "%s", exec_path));
4727 ret = get_errno(readlink(path(p), p2, arg3));
4730 unlock_user(p2, arg2, ret);
4731 unlock_user(p, arg1, 0);
4734 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4735 case TARGET_NR_readlinkat:
4738 p = lock_user_string(arg2);
4739 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4741 ret = -TARGET_EFAULT;
4743 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4744 unlock_user(p2, arg3, ret);
4745 unlock_user(p, arg2, 0);
4749 #ifdef TARGET_NR_uselib
4750 case TARGET_NR_uselib:
4753 #ifdef TARGET_NR_swapon
4754 case TARGET_NR_swapon:
4755 if (!(p = lock_user_string(arg1)))
4757 ret = get_errno(swapon(p, arg2));
4758 unlock_user(p, arg1, 0);
4761 case TARGET_NR_reboot:
4763 #ifdef TARGET_NR_readdir
4764 case TARGET_NR_readdir:
4767 #ifdef TARGET_NR_mmap
4768 case TARGET_NR_mmap:
4769 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4772 abi_ulong v1, v2, v3, v4, v5, v6;
4773 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4781 unlock_user(v, arg1, 0);
4782 ret = get_errno(target_mmap(v1, v2, v3,
4783 target_to_host_bitmask(v4, mmap_flags_tbl),
4787 ret = get_errno(target_mmap(arg1, arg2, arg3,
4788 target_to_host_bitmask(arg4, mmap_flags_tbl),
4794 #ifdef TARGET_NR_mmap2
4795 case TARGET_NR_mmap2:
4797 #define MMAP_SHIFT 12
4799 ret = get_errno(target_mmap(arg1, arg2, arg3,
4800 target_to_host_bitmask(arg4, mmap_flags_tbl),
4802 arg6 << MMAP_SHIFT));
4805 case TARGET_NR_munmap:
4806 ret = get_errno(target_munmap(arg1, arg2));
4808 case TARGET_NR_mprotect:
4809 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4811 #ifdef TARGET_NR_mremap
4812 case TARGET_NR_mremap:
4813 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4816 /* ??? msync/mlock/munlock are broken for softmmu. */
4817 #ifdef TARGET_NR_msync
4818 case TARGET_NR_msync:
4819 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4822 #ifdef TARGET_NR_mlock
4823 case TARGET_NR_mlock:
4824 ret = get_errno(mlock(g2h(arg1), arg2));
4827 #ifdef TARGET_NR_munlock
4828 case TARGET_NR_munlock:
4829 ret = get_errno(munlock(g2h(arg1), arg2));
4832 #ifdef TARGET_NR_mlockall
4833 case TARGET_NR_mlockall:
4834 ret = get_errno(mlockall(arg1));
4837 #ifdef TARGET_NR_munlockall
4838 case TARGET_NR_munlockall:
4839 ret = get_errno(munlockall());
4842 case TARGET_NR_truncate:
4843 if (!(p = lock_user_string(arg1)))
4845 ret = get_errno(truncate(p, arg2));
4846 unlock_user(p, arg1, 0);
4848 case TARGET_NR_ftruncate:
4849 ret = get_errno(ftruncate(arg1, arg2));
4851 case TARGET_NR_fchmod:
4852 ret = get_errno(fchmod(arg1, arg2));
4854 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4855 case TARGET_NR_fchmodat:
4856 if (!(p = lock_user_string(arg2)))
4858 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4859 unlock_user(p, arg2, 0);
4862 case TARGET_NR_getpriority:
4863 /* libc does special remapping of the return value of
4864 * sys_getpriority() so it's just easiest to call
4865 * sys_getpriority() directly rather than through libc. */
4866 ret = sys_getpriority(arg1, arg2);
4868 case TARGET_NR_setpriority:
4869 ret = get_errno(setpriority(arg1, arg2, arg3));
4871 #ifdef TARGET_NR_profil
4872 case TARGET_NR_profil:
4875 case TARGET_NR_statfs:
4876 if (!(p = lock_user_string(arg1)))
4878 ret = get_errno(statfs(path(p), &stfs));
4879 unlock_user(p, arg1, 0);
4881 if (!is_error(ret)) {
4882 struct target_statfs *target_stfs;
4884 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4886 __put_user(stfs.f_type, &target_stfs->f_type);
4887 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4888 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4889 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4890 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4891 __put_user(stfs.f_files, &target_stfs->f_files);
4892 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4893 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4894 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4895 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4896 unlock_user_struct(target_stfs, arg2, 1);
4899 case TARGET_NR_fstatfs:
4900 ret = get_errno(fstatfs(arg1, &stfs));
4901 goto convert_statfs;
4902 #ifdef TARGET_NR_statfs64
4903 case TARGET_NR_statfs64:
4904 if (!(p = lock_user_string(arg1)))
4906 ret = get_errno(statfs(path(p), &stfs));
4907 unlock_user(p, arg1, 0);
4909 if (!is_error(ret)) {
4910 struct target_statfs64 *target_stfs;
4912 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4914 __put_user(stfs.f_type, &target_stfs->f_type);
4915 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4916 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4917 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4918 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4919 __put_user(stfs.f_files, &target_stfs->f_files);
4920 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4921 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4922 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4923 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4924 unlock_user_struct(target_stfs, arg3, 1);
4927 case TARGET_NR_fstatfs64:
4928 ret = get_errno(fstatfs(arg1, &stfs));
4929 goto convert_statfs64;
4931 #ifdef TARGET_NR_ioperm
4932 case TARGET_NR_ioperm:
4935 #ifdef TARGET_NR_socketcall
4936 case TARGET_NR_socketcall:
4937 ret = do_socketcall(arg1, arg2);
4940 #ifdef TARGET_NR_accept
4941 case TARGET_NR_accept:
4942 ret = do_accept(arg1, arg2, arg3);
4945 #ifdef TARGET_NR_bind
4946 case TARGET_NR_bind:
4947 ret = do_bind(arg1, arg2, arg3);
4950 #ifdef TARGET_NR_connect
4951 case TARGET_NR_connect:
4952 ret = do_connect(arg1, arg2, arg3);
4955 #ifdef TARGET_NR_getpeername
4956 case TARGET_NR_getpeername:
4957 ret = do_getpeername(arg1, arg2, arg3);
4960 #ifdef TARGET_NR_getsockname
4961 case TARGET_NR_getsockname:
4962 ret = do_getsockname(arg1, arg2, arg3);
4965 #ifdef TARGET_NR_getsockopt
4966 case TARGET_NR_getsockopt:
4967 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4970 #ifdef TARGET_NR_listen
4971 case TARGET_NR_listen:
4972 ret = get_errno(listen(arg1, arg2));
4975 #ifdef TARGET_NR_recv
4976 case TARGET_NR_recv:
4977 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4980 #ifdef TARGET_NR_recvfrom
4981 case TARGET_NR_recvfrom:
4982 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4985 #ifdef TARGET_NR_recvmsg
4986 case TARGET_NR_recvmsg:
4987 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4990 #ifdef TARGET_NR_send
4991 case TARGET_NR_send:
4992 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4995 #ifdef TARGET_NR_sendmsg
4996 case TARGET_NR_sendmsg:
4997 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5000 #ifdef TARGET_NR_sendto
5001 case TARGET_NR_sendto:
5002 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5005 #ifdef TARGET_NR_shutdown
5006 case TARGET_NR_shutdown:
5007 ret = get_errno(shutdown(arg1, arg2));
5010 #ifdef TARGET_NR_socket
5011 case TARGET_NR_socket:
5012 ret = do_socket(arg1, arg2, arg3);
5015 #ifdef TARGET_NR_socketpair
5016 case TARGET_NR_socketpair:
5017 ret = do_socketpair(arg1, arg2, arg3, arg4);
5020 #ifdef TARGET_NR_setsockopt
5021 case TARGET_NR_setsockopt:
5022 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5026 case TARGET_NR_syslog:
5027 if (!(p = lock_user_string(arg2)))
5029 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5030 unlock_user(p, arg2, 0);
5033 case TARGET_NR_setitimer:
5035 struct itimerval value, ovalue, *pvalue;
5039 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5040 || copy_from_user_timeval(&pvalue->it_value,
5041 arg2 + sizeof(struct target_timeval)))
5046 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5047 if (!is_error(ret) && arg3) {
5048 if (copy_to_user_timeval(arg3,
5049 &ovalue.it_interval)
5050 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5056 case TARGET_NR_getitimer:
5058 struct itimerval value;
5060 ret = get_errno(getitimer(arg1, &value));
5061 if (!is_error(ret) && arg2) {
5062 if (copy_to_user_timeval(arg2,
5064 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5070 case TARGET_NR_stat:
5071 if (!(p = lock_user_string(arg1)))
5073 ret = get_errno(stat(path(p), &st));
5074 unlock_user(p, arg1, 0);
5076 case TARGET_NR_lstat:
5077 if (!(p = lock_user_string(arg1)))
5079 ret = get_errno(lstat(path(p), &st));
5080 unlock_user(p, arg1, 0);
5082 case TARGET_NR_fstat:
5084 ret = get_errno(fstat(arg1, &st));
5086 if (!is_error(ret)) {
5087 struct target_stat *target_st;
5089 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5091 __put_user(st.st_dev, &target_st->st_dev);
5092 __put_user(st.st_ino, &target_st->st_ino);
5093 __put_user(st.st_mode, &target_st->st_mode);
5094 __put_user(st.st_uid, &target_st->st_uid);
5095 __put_user(st.st_gid, &target_st->st_gid);
5096 __put_user(st.st_nlink, &target_st->st_nlink);
5097 __put_user(st.st_rdev, &target_st->st_rdev);
5098 __put_user(st.st_size, &target_st->st_size);
5099 __put_user(st.st_blksize, &target_st->st_blksize);
5100 __put_user(st.st_blocks, &target_st->st_blocks);
5101 __put_user(st.st_atime, &target_st->target_st_atime);
5102 __put_user(st.st_mtime, &target_st->target_st_mtime);
5103 __put_user(st.st_ctime, &target_st->target_st_ctime);
5104 unlock_user_struct(target_st, arg2, 1);
5108 #ifdef TARGET_NR_olduname
5109 case TARGET_NR_olduname:
5112 #ifdef TARGET_NR_iopl
5113 case TARGET_NR_iopl:
5116 case TARGET_NR_vhangup:
5117 ret = get_errno(vhangup());
5119 #ifdef TARGET_NR_idle
5120 case TARGET_NR_idle:
5123 #ifdef TARGET_NR_syscall
5124 case TARGET_NR_syscall:
5125 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5128 case TARGET_NR_wait4:
5131 abi_long status_ptr = arg2;
5132 struct rusage rusage, *rusage_ptr;
5133 abi_ulong target_rusage = arg4;
5135 rusage_ptr = &rusage;
5138 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5139 if (!is_error(ret)) {
5141 if (put_user_s32(status, status_ptr))
5145 host_to_target_rusage(target_rusage, &rusage);
5149 #ifdef TARGET_NR_swapoff
5150 case TARGET_NR_swapoff:
5151 if (!(p = lock_user_string(arg1)))
5153 ret = get_errno(swapoff(p));
5154 unlock_user(p, arg1, 0);
5157 case TARGET_NR_sysinfo:
5159 struct target_sysinfo *target_value;
5160 struct sysinfo value;
5161 ret = get_errno(sysinfo(&value));
5162 if (!is_error(ret) && arg1)
5164 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5166 __put_user(value.uptime, &target_value->uptime);
5167 __put_user(value.loads[0], &target_value->loads[0]);
5168 __put_user(value.loads[1], &target_value->loads[1]);
5169 __put_user(value.loads[2], &target_value->loads[2]);
5170 __put_user(value.totalram, &target_value->totalram);
5171 __put_user(value.freeram, &target_value->freeram);
5172 __put_user(value.sharedram, &target_value->sharedram);
5173 __put_user(value.bufferram, &target_value->bufferram);
5174 __put_user(value.totalswap, &target_value->totalswap);
5175 __put_user(value.freeswap, &target_value->freeswap);
5176 __put_user(value.procs, &target_value->procs);
5177 __put_user(value.totalhigh, &target_value->totalhigh);
5178 __put_user(value.freehigh, &target_value->freehigh);
5179 __put_user(value.mem_unit, &target_value->mem_unit);
5180 unlock_user_struct(target_value, arg1, 1);
5184 #ifdef TARGET_NR_ipc
5186 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5189 #ifdef TARGET_NR_semget
5190 case TARGET_NR_semget:
5191 ret = get_errno(semget(arg1, arg2, arg3));
5194 #ifdef TARGET_NR_semop
5195 case TARGET_NR_semop:
5196 ret = get_errno(do_semop(arg1, arg2, arg3));
5199 #ifdef TARGET_NR_semctl
5200 case TARGET_NR_semctl:
5201 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5204 #ifdef TARGET_NR_msgctl
5205 case TARGET_NR_msgctl:
5206 ret = do_msgctl(arg1, arg2, arg3);
5209 #ifdef TARGET_NR_msgget
5210 case TARGET_NR_msgget:
5211 ret = get_errno(msgget(arg1, arg2));
5214 #ifdef TARGET_NR_msgrcv
5215 case TARGET_NR_msgrcv:
5216 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5219 #ifdef TARGET_NR_msgsnd
5220 case TARGET_NR_msgsnd:
5221 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5224 #ifdef TARGET_NR_shmget
5225 case TARGET_NR_shmget:
5226 ret = get_errno(shmget(arg1, arg2, arg3));
5229 #ifdef TARGET_NR_shmctl
5230 case TARGET_NR_shmctl:
5231 ret = do_shmctl(arg1, arg2, arg3);
5234 #ifdef TARGET_NR_shmat
5235 case TARGET_NR_shmat:
5240 err = do_shmat(arg1, arg2, arg3, &_ret);
5241 ret = err ? err : _ret;
5245 #ifdef TARGET_NR_shmdt
5246 case TARGET_NR_shmdt:
5247 ret = do_shmdt(arg1);
5250 case TARGET_NR_fsync:
5251 ret = get_errno(fsync(arg1));
5253 case TARGET_NR_clone:
5254 #if defined(TARGET_SH4)
5255 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5256 #elif defined(TARGET_CRIS)
5257 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5259 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5262 #ifdef __NR_exit_group
5263 /* new thread calls */
5264 case TARGET_NR_exit_group:
5268 gdb_exit(cpu_env, arg1);
5269 ret = get_errno(exit_group(arg1));
5272 case TARGET_NR_setdomainname:
5273 if (!(p = lock_user_string(arg1)))
5275 ret = get_errno(setdomainname(p, arg2));
5276 unlock_user(p, arg1, 0);
5278 case TARGET_NR_uname:
5279 /* no need to transcode because we use the linux syscall */
5281 struct new_utsname * buf;
5283 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5285 ret = get_errno(sys_uname(buf));
5286 if (!is_error(ret)) {
5287 /* Overrite the native machine name with whatever is being
5289 strcpy (buf->machine, UNAME_MACHINE);
5290 /* Allow the user to override the reported release. */
5291 if (qemu_uname_release && *qemu_uname_release)
5292 strcpy (buf->release, qemu_uname_release);
5294 unlock_user_struct(buf, arg1, 1);
5298 case TARGET_NR_modify_ldt:
5299 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5301 #if !defined(TARGET_X86_64)
5302 case TARGET_NR_vm86old:
5304 case TARGET_NR_vm86:
5305 ret = do_vm86(cpu_env, arg1, arg2);
5309 case TARGET_NR_adjtimex:
5311 #ifdef TARGET_NR_create_module
5312 case TARGET_NR_create_module:
5314 case TARGET_NR_init_module:
5315 case TARGET_NR_delete_module:
5316 #ifdef TARGET_NR_get_kernel_syms
5317 case TARGET_NR_get_kernel_syms:
5320 case TARGET_NR_quotactl:
5322 case TARGET_NR_getpgid:
5323 ret = get_errno(getpgid(arg1));
5325 case TARGET_NR_fchdir:
5326 ret = get_errno(fchdir(arg1));
5328 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5329 case TARGET_NR_bdflush:
5332 #ifdef TARGET_NR_sysfs
5333 case TARGET_NR_sysfs:
5336 case TARGET_NR_personality:
5337 ret = get_errno(personality(arg1));
5339 #ifdef TARGET_NR_afs_syscall
5340 case TARGET_NR_afs_syscall:
5343 #ifdef TARGET_NR__llseek /* Not on alpha */
5344 case TARGET_NR__llseek:
5346 #if defined (__x86_64__)
5347 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5348 if (put_user_s64(ret, arg4))
5352 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5353 if (put_user_s64(res, arg4))
5359 case TARGET_NR_getdents:
5360 #if TARGET_ABI_BITS != 32
5362 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5364 struct target_dirent *target_dirp;
5365 struct linux_dirent *dirp;
5366 abi_long count = arg3;
5368 dirp = malloc(count);
5370 ret = -TARGET_ENOMEM;
5374 ret = get_errno(sys_getdents(arg1, dirp, count));
5375 if (!is_error(ret)) {
5376 struct linux_dirent *de;
5377 struct target_dirent *tde;
5379 int reclen, treclen;
5380 int count1, tnamelen;
5384 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5388 reclen = de->d_reclen;
5389 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5390 tde->d_reclen = tswap16(treclen);
5391 tde->d_ino = tswapl(de->d_ino);
5392 tde->d_off = tswapl(de->d_off);
5393 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5396 /* XXX: may not be correct */
5397 pstrcpy(tde->d_name, tnamelen, de->d_name);
5398 de = (struct linux_dirent *)((char *)de + reclen);
5400 tde = (struct target_dirent *)((char *)tde + treclen);
5404 unlock_user(target_dirp, arg2, ret);
5410 struct linux_dirent *dirp;
5411 abi_long count = arg3;
5413 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5415 ret = get_errno(sys_getdents(arg1, dirp, count));
5416 if (!is_error(ret)) {
5417 struct linux_dirent *de;
5422 reclen = de->d_reclen;
5425 de->d_reclen = tswap16(reclen);
5426 tswapls(&de->d_ino);
5427 tswapls(&de->d_off);
5428 de = (struct linux_dirent *)((char *)de + reclen);
5432 unlock_user(dirp, arg2, ret);
5436 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5437 case TARGET_NR_getdents64:
5439 struct linux_dirent64 *dirp;
5440 abi_long count = arg3;
5441 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5443 ret = get_errno(sys_getdents64(arg1, dirp, count));
5444 if (!is_error(ret)) {
5445 struct linux_dirent64 *de;
5450 reclen = de->d_reclen;
5453 de->d_reclen = tswap16(reclen);
5454 tswap64s((uint64_t *)&de->d_ino);
5455 tswap64s((uint64_t *)&de->d_off);
5456 de = (struct linux_dirent64 *)((char *)de + reclen);
5460 unlock_user(dirp, arg2, ret);
5463 #endif /* TARGET_NR_getdents64 */
5464 #ifdef TARGET_NR__newselect
5465 case TARGET_NR__newselect:
5466 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5469 #ifdef TARGET_NR_poll
5470 case TARGET_NR_poll:
5472 struct target_pollfd *target_pfd;
5473 unsigned int nfds = arg2;
5478 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5481 pfd = alloca(sizeof(struct pollfd) * nfds);
5482 for(i = 0; i < nfds; i++) {
5483 pfd[i].fd = tswap32(target_pfd[i].fd);
5484 pfd[i].events = tswap16(target_pfd[i].events);
5486 ret = get_errno(poll(pfd, nfds, timeout));
5487 if (!is_error(ret)) {
5488 for(i = 0; i < nfds; i++) {
5489 target_pfd[i].revents = tswap16(pfd[i].revents);
5491 ret += nfds * (sizeof(struct target_pollfd)
5492 - sizeof(struct pollfd));
5494 unlock_user(target_pfd, arg1, ret);
5498 case TARGET_NR_flock:
5499 /* NOTE: the flock constant seems to be the same for every
5501 ret = get_errno(flock(arg1, arg2));
5503 case TARGET_NR_readv:
5508 vec = alloca(count * sizeof(struct iovec));
5509 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5511 ret = get_errno(readv(arg1, vec, count));
5512 unlock_iovec(vec, arg2, count, 1);
5515 case TARGET_NR_writev:
5520 vec = alloca(count * sizeof(struct iovec));
5521 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5523 ret = get_errno(writev(arg1, vec, count));
5524 unlock_iovec(vec, arg2, count, 0);
5527 case TARGET_NR_getsid:
5528 ret = get_errno(getsid(arg1));
5530 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5531 case TARGET_NR_fdatasync:
5532 ret = get_errno(fdatasync(arg1));
5535 case TARGET_NR__sysctl:
5536 /* We don't implement this, but ENOTDIR is always a safe
5538 ret = -TARGET_ENOTDIR;
5540 case TARGET_NR_sched_setparam:
5542 struct sched_param *target_schp;
5543 struct sched_param schp;
5545 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5547 schp.sched_priority = tswap32(target_schp->sched_priority);
5548 unlock_user_struct(target_schp, arg2, 0);
5549 ret = get_errno(sched_setparam(arg1, &schp));
5552 case TARGET_NR_sched_getparam:
5554 struct sched_param *target_schp;
5555 struct sched_param schp;
5556 ret = get_errno(sched_getparam(arg1, &schp));
5557 if (!is_error(ret)) {
5558 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5560 target_schp->sched_priority = tswap32(schp.sched_priority);
5561 unlock_user_struct(target_schp, arg2, 1);
5565 case TARGET_NR_sched_setscheduler:
5567 struct sched_param *target_schp;
5568 struct sched_param schp;
5569 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5571 schp.sched_priority = tswap32(target_schp->sched_priority);
5572 unlock_user_struct(target_schp, arg3, 0);
5573 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5576 case TARGET_NR_sched_getscheduler:
5577 ret = get_errno(sched_getscheduler(arg1));
5579 case TARGET_NR_sched_yield:
5580 ret = get_errno(sched_yield());
5582 case TARGET_NR_sched_get_priority_max:
5583 ret = get_errno(sched_get_priority_max(arg1));
5585 case TARGET_NR_sched_get_priority_min:
5586 ret = get_errno(sched_get_priority_min(arg1));
5588 case TARGET_NR_sched_rr_get_interval:
5591 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5592 if (!is_error(ret)) {
5593 host_to_target_timespec(arg2, &ts);
5597 case TARGET_NR_nanosleep:
5599 struct timespec req, rem;
5600 target_to_host_timespec(&req, arg1);
5601 ret = get_errno(nanosleep(&req, &rem));
5602 if (is_error(ret) && arg2) {
5603 host_to_target_timespec(arg2, &rem);
5607 #ifdef TARGET_NR_query_module
5608 case TARGET_NR_query_module:
5611 #ifdef TARGET_NR_nfsservctl
5612 case TARGET_NR_nfsservctl:
5615 case TARGET_NR_prctl:
5618 case PR_GET_PDEATHSIG:
5621 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5622 if (!is_error(ret) && arg2
5623 && put_user_ual(deathsig, arg2))
5628 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5632 #ifdef TARGET_NR_arch_prctl
5633 case TARGET_NR_arch_prctl:
5634 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5635 ret = do_arch_prctl(cpu_env, arg1, arg2);
5641 #ifdef TARGET_NR_pread
5642 case TARGET_NR_pread:
5644 if (((CPUARMState *)cpu_env)->eabi)
5647 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5649 ret = get_errno(pread(arg1, p, arg3, arg4));
5650 unlock_user(p, arg2, ret);
5652 case TARGET_NR_pwrite:
5654 if (((CPUARMState *)cpu_env)->eabi)
5657 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5659 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5660 unlock_user(p, arg2, 0);
5663 #ifdef TARGET_NR_pread64
5664 case TARGET_NR_pread64:
5665 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5667 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5668 unlock_user(p, arg2, ret);
5670 case TARGET_NR_pwrite64:
5671 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5673 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5674 unlock_user(p, arg2, 0);
5677 case TARGET_NR_getcwd:
5678 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5680 ret = get_errno(sys_getcwd1(p, arg2));
5681 unlock_user(p, arg1, ret);
5683 case TARGET_NR_capget:
5685 case TARGET_NR_capset:
5687 case TARGET_NR_sigaltstack:
5688 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5689 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5690 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5695 case TARGET_NR_sendfile:
5697 #ifdef TARGET_NR_getpmsg
5698 case TARGET_NR_getpmsg:
5701 #ifdef TARGET_NR_putpmsg
5702 case TARGET_NR_putpmsg:
5705 #ifdef TARGET_NR_vfork
5706 case TARGET_NR_vfork:
5707 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5711 #ifdef TARGET_NR_ugetrlimit
5712 case TARGET_NR_ugetrlimit:
5715 ret = get_errno(getrlimit(arg1, &rlim));
5716 if (!is_error(ret)) {
5717 struct target_rlimit *target_rlim;
5718 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5720 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5721 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5722 unlock_user_struct(target_rlim, arg2, 1);
5727 #ifdef TARGET_NR_truncate64
5728 case TARGET_NR_truncate64:
5729 if (!(p = lock_user_string(arg1)))
5731 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5732 unlock_user(p, arg1, 0);
5735 #ifdef TARGET_NR_ftruncate64
5736 case TARGET_NR_ftruncate64:
5737 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5740 #ifdef TARGET_NR_stat64
5741 case TARGET_NR_stat64:
5742 if (!(p = lock_user_string(arg1)))
5744 ret = get_errno(stat(path(p), &st));
5745 unlock_user(p, arg1, 0);
5747 ret = host_to_target_stat64(cpu_env, arg2, &st);
5750 #ifdef TARGET_NR_lstat64
5751 case TARGET_NR_lstat64:
5752 if (!(p = lock_user_string(arg1)))
5754 ret = get_errno(lstat(path(p), &st));
5755 unlock_user(p, arg1, 0);
5757 ret = host_to_target_stat64(cpu_env, arg2, &st);
5760 #ifdef TARGET_NR_fstat64
5761 case TARGET_NR_fstat64:
5762 ret = get_errno(fstat(arg1, &st));
5764 ret = host_to_target_stat64(cpu_env, arg2, &st);
5767 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5768 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5769 #ifdef TARGET_NR_fstatat64
5770 case TARGET_NR_fstatat64:
5772 #ifdef TARGET_NR_newfstatat
5773 case TARGET_NR_newfstatat:
5775 if (!(p = lock_user_string(arg2)))
5777 #ifdef __NR_fstatat64
5778 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5780 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
5783 ret = host_to_target_stat64(cpu_env, arg3, &st);
5787 case TARGET_NR_lchown:
5788 if (!(p = lock_user_string(arg1)))
5790 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5791 unlock_user(p, arg1, 0);
5793 case TARGET_NR_getuid:
5794 ret = get_errno(high2lowuid(getuid()));
5796 case TARGET_NR_getgid:
5797 ret = get_errno(high2lowgid(getgid()));
5799 case TARGET_NR_geteuid:
5800 ret = get_errno(high2lowuid(geteuid()));
5802 case TARGET_NR_getegid:
5803 ret = get_errno(high2lowgid(getegid()));
5805 case TARGET_NR_setreuid:
5806 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5808 case TARGET_NR_setregid:
5809 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5811 case TARGET_NR_getgroups:
5813 int gidsetsize = arg1;
5814 uint16_t *target_grouplist;
5818 grouplist = alloca(gidsetsize * sizeof(gid_t));
5819 ret = get_errno(getgroups(gidsetsize, grouplist));
5820 if (gidsetsize == 0)
5822 if (!is_error(ret)) {
5823 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5824 if (!target_grouplist)
5826 for(i = 0;i < ret; i++)
5827 target_grouplist[i] = tswap16(grouplist[i]);
5828 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5832 case TARGET_NR_setgroups:
5834 int gidsetsize = arg1;
5835 uint16_t *target_grouplist;
5839 grouplist = alloca(gidsetsize * sizeof(gid_t));
5840 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5841 if (!target_grouplist) {
5842 ret = -TARGET_EFAULT;
5845 for(i = 0;i < gidsetsize; i++)
5846 grouplist[i] = tswap16(target_grouplist[i]);
5847 unlock_user(target_grouplist, arg2, 0);
5848 ret = get_errno(setgroups(gidsetsize, grouplist));
5851 case TARGET_NR_fchown:
5852 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5854 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5855 case TARGET_NR_fchownat:
5856 if (!(p = lock_user_string(arg2)))
5858 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5859 unlock_user(p, arg2, 0);
5862 #ifdef TARGET_NR_setresuid
5863 case TARGET_NR_setresuid:
5864 ret = get_errno(setresuid(low2highuid(arg1),
5866 low2highuid(arg3)));
5869 #ifdef TARGET_NR_getresuid
5870 case TARGET_NR_getresuid:
5872 uid_t ruid, euid, suid;
5873 ret = get_errno(getresuid(&ruid, &euid, &suid));
5874 if (!is_error(ret)) {
5875 if (put_user_u16(high2lowuid(ruid), arg1)
5876 || put_user_u16(high2lowuid(euid), arg2)
5877 || put_user_u16(high2lowuid(suid), arg3))
5883 #ifdef TARGET_NR_getresgid
5884 case TARGET_NR_setresgid:
5885 ret = get_errno(setresgid(low2highgid(arg1),
5887 low2highgid(arg3)));
5890 #ifdef TARGET_NR_getresgid
5891 case TARGET_NR_getresgid:
5893 gid_t rgid, egid, sgid;
5894 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5895 if (!is_error(ret)) {
5896 if (put_user_u16(high2lowgid(rgid), arg1)
5897 || put_user_u16(high2lowgid(egid), arg2)
5898 || put_user_u16(high2lowgid(sgid), arg3))
5904 case TARGET_NR_chown:
5905 if (!(p = lock_user_string(arg1)))
5907 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5908 unlock_user(p, arg1, 0);
5910 case TARGET_NR_setuid:
5911 ret = get_errno(setuid(low2highuid(arg1)));
5913 case TARGET_NR_setgid:
5914 ret = get_errno(setgid(low2highgid(arg1)));
5916 case TARGET_NR_setfsuid:
5917 ret = get_errno(setfsuid(arg1));
5919 case TARGET_NR_setfsgid:
5920 ret = get_errno(setfsgid(arg1));
5922 #endif /* USE_UID16 */
5924 #ifdef TARGET_NR_lchown32
5925 case TARGET_NR_lchown32:
5926 if (!(p = lock_user_string(arg1)))
5928 ret = get_errno(lchown(p, arg2, arg3));
5929 unlock_user(p, arg1, 0);
5932 #ifdef TARGET_NR_getuid32
5933 case TARGET_NR_getuid32:
5934 ret = get_errno(getuid());
5938 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5939 /* Alpha specific */
5940 case TARGET_NR_getxuid:
5944 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
5946 ret = get_errno(getuid());
5949 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5950 /* Alpha specific */
5951 case TARGET_NR_getxgid:
5955 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
5957 ret = get_errno(getgid());
5961 #ifdef TARGET_NR_getgid32
5962 case TARGET_NR_getgid32:
5963 ret = get_errno(getgid());
5966 #ifdef TARGET_NR_geteuid32
5967 case TARGET_NR_geteuid32:
5968 ret = get_errno(geteuid());
5971 #ifdef TARGET_NR_getegid32
5972 case TARGET_NR_getegid32:
5973 ret = get_errno(getegid());
5976 #ifdef TARGET_NR_setreuid32
5977 case TARGET_NR_setreuid32:
5978 ret = get_errno(setreuid(arg1, arg2));
5981 #ifdef TARGET_NR_setregid32
5982 case TARGET_NR_setregid32:
5983 ret = get_errno(setregid(arg1, arg2));
5986 #ifdef TARGET_NR_getgroups32
5987 case TARGET_NR_getgroups32:
5989 int gidsetsize = arg1;
5990 uint32_t *target_grouplist;
5994 grouplist = alloca(gidsetsize * sizeof(gid_t));
5995 ret = get_errno(getgroups(gidsetsize, grouplist));
5996 if (gidsetsize == 0)
5998 if (!is_error(ret)) {
5999 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6000 if (!target_grouplist) {
6001 ret = -TARGET_EFAULT;
6004 for(i = 0;i < ret; i++)
6005 target_grouplist[i] = tswap32(grouplist[i]);
6006 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6011 #ifdef TARGET_NR_setgroups32
6012 case TARGET_NR_setgroups32:
6014 int gidsetsize = arg1;
6015 uint32_t *target_grouplist;
6019 grouplist = alloca(gidsetsize * sizeof(gid_t));
6020 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6021 if (!target_grouplist) {
6022 ret = -TARGET_EFAULT;
6025 for(i = 0;i < gidsetsize; i++)
6026 grouplist[i] = tswap32(target_grouplist[i]);
6027 unlock_user(target_grouplist, arg2, 0);
6028 ret = get_errno(setgroups(gidsetsize, grouplist));
6032 #ifdef TARGET_NR_fchown32
6033 case TARGET_NR_fchown32:
6034 ret = get_errno(fchown(arg1, arg2, arg3));
6037 #ifdef TARGET_NR_setresuid32
6038 case TARGET_NR_setresuid32:
6039 ret = get_errno(setresuid(arg1, arg2, arg3));
6042 #ifdef TARGET_NR_getresuid32
6043 case TARGET_NR_getresuid32:
6045 uid_t ruid, euid, suid;
6046 ret = get_errno(getresuid(&ruid, &euid, &suid));
6047 if (!is_error(ret)) {
6048 if (put_user_u32(ruid, arg1)
6049 || put_user_u32(euid, arg2)
6050 || put_user_u32(suid, arg3))
6056 #ifdef TARGET_NR_setresgid32
6057 case TARGET_NR_setresgid32:
6058 ret = get_errno(setresgid(arg1, arg2, arg3));
6061 #ifdef TARGET_NR_getresgid32
6062 case TARGET_NR_getresgid32:
6064 gid_t rgid, egid, sgid;
6065 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6066 if (!is_error(ret)) {
6067 if (put_user_u32(rgid, arg1)
6068 || put_user_u32(egid, arg2)
6069 || put_user_u32(sgid, arg3))
6075 #ifdef TARGET_NR_chown32
6076 case TARGET_NR_chown32:
6077 if (!(p = lock_user_string(arg1)))
6079 ret = get_errno(chown(p, arg2, arg3));
6080 unlock_user(p, arg1, 0);
6083 #ifdef TARGET_NR_setuid32
6084 case TARGET_NR_setuid32:
6085 ret = get_errno(setuid(arg1));
6088 #ifdef TARGET_NR_setgid32
6089 case TARGET_NR_setgid32:
6090 ret = get_errno(setgid(arg1));
6093 #ifdef TARGET_NR_setfsuid32
6094 case TARGET_NR_setfsuid32:
6095 ret = get_errno(setfsuid(arg1));
6098 #ifdef TARGET_NR_setfsgid32
6099 case TARGET_NR_setfsgid32:
6100 ret = get_errno(setfsgid(arg1));
6104 case TARGET_NR_pivot_root:
6106 #ifdef TARGET_NR_mincore
6107 case TARGET_NR_mincore:
6110 ret = -TARGET_EFAULT;
6111 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6113 if (!(p = lock_user_string(arg3)))
6115 ret = get_errno(mincore(a, arg2, p));
6116 unlock_user(p, arg3, ret);
6118 unlock_user(a, arg1, 0);
6122 #ifdef TARGET_NR_arm_fadvise64_64
6123 case TARGET_NR_arm_fadvise64_64:
6126 * arm_fadvise64_64 looks like fadvise64_64 but
6127 * with different argument order
6135 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6136 #ifdef TARGET_NR_fadvise64_64
6137 case TARGET_NR_fadvise64_64:
6139 /* This is a hint, so ignoring and returning success is ok. */
6143 #ifdef TARGET_NR_madvise
6144 case TARGET_NR_madvise:
6145 /* A straight passthrough may not be safe because qemu sometimes
6146 turns private flie-backed mappings into anonymous mappings.
6147 This will break MADV_DONTNEED.
6148 This is a hint, so ignoring and returning success is ok. */
6152 #if TARGET_ABI_BITS == 32
6153 case TARGET_NR_fcntl64:
6157 struct target_flock64 *target_fl;
6159 struct target_eabi_flock64 *target_efl;
6163 case TARGET_F_GETLK64:
6166 case TARGET_F_SETLK64:
6169 case TARGET_F_SETLKW64:
6178 case TARGET_F_GETLK64:
6180 if (((CPUARMState *)cpu_env)->eabi) {
6181 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6183 fl.l_type = tswap16(target_efl->l_type);
6184 fl.l_whence = tswap16(target_efl->l_whence);
6185 fl.l_start = tswap64(target_efl->l_start);
6186 fl.l_len = tswap64(target_efl->l_len);
6187 fl.l_pid = tswapl(target_efl->l_pid);
6188 unlock_user_struct(target_efl, arg3, 0);
6192 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6194 fl.l_type = tswap16(target_fl->l_type);
6195 fl.l_whence = tswap16(target_fl->l_whence);
6196 fl.l_start = tswap64(target_fl->l_start);
6197 fl.l_len = tswap64(target_fl->l_len);
6198 fl.l_pid = tswapl(target_fl->l_pid);
6199 unlock_user_struct(target_fl, arg3, 0);
6201 ret = get_errno(fcntl(arg1, cmd, &fl));
6204 if (((CPUARMState *)cpu_env)->eabi) {
6205 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6207 target_efl->l_type = tswap16(fl.l_type);
6208 target_efl->l_whence = tswap16(fl.l_whence);
6209 target_efl->l_start = tswap64(fl.l_start);
6210 target_efl->l_len = tswap64(fl.l_len);
6211 target_efl->l_pid = tswapl(fl.l_pid);
6212 unlock_user_struct(target_efl, arg3, 1);
6216 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6218 target_fl->l_type = tswap16(fl.l_type);
6219 target_fl->l_whence = tswap16(fl.l_whence);
6220 target_fl->l_start = tswap64(fl.l_start);
6221 target_fl->l_len = tswap64(fl.l_len);
6222 target_fl->l_pid = tswapl(fl.l_pid);
6223 unlock_user_struct(target_fl, arg3, 1);
6228 case TARGET_F_SETLK64:
6229 case TARGET_F_SETLKW64:
6231 if (((CPUARMState *)cpu_env)->eabi) {
6232 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6234 fl.l_type = tswap16(target_efl->l_type);
6235 fl.l_whence = tswap16(target_efl->l_whence);
6236 fl.l_start = tswap64(target_efl->l_start);
6237 fl.l_len = tswap64(target_efl->l_len);
6238 fl.l_pid = tswapl(target_efl->l_pid);
6239 unlock_user_struct(target_efl, arg3, 0);
6243 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6245 fl.l_type = tswap16(target_fl->l_type);
6246 fl.l_whence = tswap16(target_fl->l_whence);
6247 fl.l_start = tswap64(target_fl->l_start);
6248 fl.l_len = tswap64(target_fl->l_len);
6249 fl.l_pid = tswapl(target_fl->l_pid);
6250 unlock_user_struct(target_fl, arg3, 0);
6252 ret = get_errno(fcntl(arg1, cmd, &fl));
6255 ret = do_fcntl(arg1, cmd, arg3);
6261 #ifdef TARGET_NR_cacheflush
6262 case TARGET_NR_cacheflush:
6263 /* self-modifying code is handled automatically, so nothing needed */
6267 #ifdef TARGET_NR_security
6268 case TARGET_NR_security:
6271 #ifdef TARGET_NR_getpagesize
6272 case TARGET_NR_getpagesize:
6273 ret = TARGET_PAGE_SIZE;
6276 case TARGET_NR_gettid:
6277 ret = get_errno(gettid());
6279 #ifdef TARGET_NR_readahead
6280 case TARGET_NR_readahead:
6281 #if TARGET_ABI_BITS == 32
6283 if (((CPUARMState *)cpu_env)->eabi)
6290 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6292 ret = get_errno(readahead(arg1, arg2, arg3));
6296 #ifdef TARGET_NR_setxattr
6297 case TARGET_NR_setxattr:
6298 case TARGET_NR_lsetxattr:
6299 case TARGET_NR_fsetxattr:
6300 case TARGET_NR_getxattr:
6301 case TARGET_NR_lgetxattr:
6302 case TARGET_NR_fgetxattr:
6303 case TARGET_NR_listxattr:
6304 case TARGET_NR_llistxattr:
6305 case TARGET_NR_flistxattr:
6306 case TARGET_NR_removexattr:
6307 case TARGET_NR_lremovexattr:
6308 case TARGET_NR_fremovexattr:
6309 goto unimplemented_nowarn;
6311 #ifdef TARGET_NR_set_thread_area
6312 case TARGET_NR_set_thread_area:
6313 #if defined(TARGET_MIPS)
6314 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6317 #elif defined(TARGET_CRIS)
6319 ret = -TARGET_EINVAL;
6321 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6325 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6326 ret = do_set_thread_area(cpu_env, arg1);
6329 goto unimplemented_nowarn;
6332 #ifdef TARGET_NR_get_thread_area
6333 case TARGET_NR_get_thread_area:
6334 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6335 ret = do_get_thread_area(cpu_env, arg1);
6337 goto unimplemented_nowarn;
6340 #ifdef TARGET_NR_getdomainname
6341 case TARGET_NR_getdomainname:
6342 goto unimplemented_nowarn;
6345 #ifdef TARGET_NR_clock_gettime
6346 case TARGET_NR_clock_gettime:
6349 ret = get_errno(clock_gettime(arg1, &ts));
6350 if (!is_error(ret)) {
6351 host_to_target_timespec(arg2, &ts);
6356 #ifdef TARGET_NR_clock_getres
6357 case TARGET_NR_clock_getres:
6360 ret = get_errno(clock_getres(arg1, &ts));
6361 if (!is_error(ret)) {
6362 host_to_target_timespec(arg2, &ts);
6367 #ifdef TARGET_NR_clock_nanosleep
6368 case TARGET_NR_clock_nanosleep:
6371 target_to_host_timespec(&ts, arg3);
6372 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6374 host_to_target_timespec(arg4, &ts);
6379 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6380 case TARGET_NR_set_tid_address:
6381 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6385 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6386 case TARGET_NR_tkill:
6387 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6391 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6392 case TARGET_NR_tgkill:
6393 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6394 target_to_host_signal(arg3)));
6398 #ifdef TARGET_NR_set_robust_list
6399 case TARGET_NR_set_robust_list:
6400 goto unimplemented_nowarn;
6403 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6404 case TARGET_NR_utimensat:
6406 struct timespec ts[2];
6407 target_to_host_timespec(ts, arg3);
6408 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6410 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6412 if (!(p = lock_user_string(arg2))) {
6413 ret = -TARGET_EFAULT;
6416 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6417 unlock_user(p, arg2, 0);
6422 #if defined(USE_NPTL)
6423 case TARGET_NR_futex:
6424 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6427 #ifdef TARGET_NR_inotify_init
6428 case TARGET_NR_inotify_init:
6429 ret = get_errno(sys_inotify_init());
6432 #ifdef TARGET_NR_inotify_add_watch
6433 case TARGET_NR_inotify_add_watch:
6434 p = lock_user_string(arg2);
6435 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6436 unlock_user(p, arg2, 0);
6439 #ifdef TARGET_NR_inotify_rm_watch
6440 case TARGET_NR_inotify_rm_watch:
6441 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6445 #ifdef TARGET_NR_mq_open
6446 case TARGET_NR_mq_open:
6448 struct mq_attr posix_mq_attr;
6450 p = lock_user_string(arg1 - 1);
6452 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6453 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6454 unlock_user (p, arg1, 0);
6458 case TARGET_NR_mq_unlink:
6459 p = lock_user_string(arg1 - 1);
6460 ret = get_errno(mq_unlink(p));
6461 unlock_user (p, arg1, 0);
6464 case TARGET_NR_mq_timedsend:
6468 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6470 target_to_host_timespec(&ts, arg5);
6471 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6472 host_to_target_timespec(arg5, &ts);
6475 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6476 unlock_user (p, arg2, arg3);
6480 case TARGET_NR_mq_timedreceive:
6485 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6487 target_to_host_timespec(&ts, arg5);
6488 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6489 host_to_target_timespec(arg5, &ts);
6492 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6493 unlock_user (p, arg2, arg3);
6495 put_user_u32(prio, arg4);
6499 /* Not implemented for now... */
6500 /* case TARGET_NR_mq_notify: */
6503 case TARGET_NR_mq_getsetattr:
6505 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6508 ret = mq_getattr(arg1, &posix_mq_attr_out);
6509 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6512 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6513 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6522 gemu_log("qemu: Unsupported syscall: %d\n", num);
6523 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6524 unimplemented_nowarn:
6526 ret = -TARGET_ENOSYS;
6531 gemu_log(" = %ld\n", ret);
6534 print_syscall_ret(num, ret);
6537 ret = -TARGET_EFAULT;