4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
32 #include <sys/types.h>
38 #include <sys/mount.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
45 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <qemu-common.h>
62 #define termios host_termios
63 #define winsize host_winsize
64 #define termio host_termio
65 #define sgttyb host_sgttyb /* same as target */
66 #define tchars host_tchars /* same as target */
67 #define ltchars host_ltchars /* same as target */
69 #include <linux/termios.h>
70 #include <linux/unistd.h>
71 #include <linux/utsname.h>
72 #include <linux/cdrom.h>
73 #include <linux/hdreg.h>
74 #include <linux/soundcard.h>
76 #include <linux/mtio.h>
77 #include "linux_loop.h"
80 #include "qemu-common.h"
83 #include <linux/futex.h>
84 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
85 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
87 /* XXX: Hardcode the above values. */
88 #define CLONE_NPTL_FLAGS2 0
93 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
94 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
95 /* 16 bit uid wrappers emulation */
99 //#include <linux/msdos_fs.h>
100 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
101 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
112 #define _syscall0(type,name) \
113 static type name (void) \
115 return syscall(__NR_##name); \
118 #define _syscall1(type,name,type1,arg1) \
119 static type name (type1 arg1) \
121 return syscall(__NR_##name, arg1); \
124 #define _syscall2(type,name,type1,arg1,type2,arg2) \
125 static type name (type1 arg1,type2 arg2) \
127 return syscall(__NR_##name, arg1, arg2); \
130 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
131 static type name (type1 arg1,type2 arg2,type3 arg3) \
133 return syscall(__NR_##name, arg1, arg2, arg3); \
136 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
137 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
139 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
142 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
144 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
146 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
150 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
151 type5,arg5,type6,arg6) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
159 #define __NR_sys_exit __NR_exit
160 #define __NR_sys_uname __NR_uname
161 #define __NR_sys_faccessat __NR_faccessat
162 #define __NR_sys_fchmodat __NR_fchmodat
163 #define __NR_sys_fchownat __NR_fchownat
164 #define __NR_sys_fstatat64 __NR_fstatat64
165 #define __NR_sys_futimesat __NR_futimesat
166 #define __NR_sys_getcwd1 __NR_getcwd
167 #define __NR_sys_getdents __NR_getdents
168 #define __NR_sys_getdents64 __NR_getdents64
169 #define __NR_sys_getpriority __NR_getpriority
170 #define __NR_sys_linkat __NR_linkat
171 #define __NR_sys_mkdirat __NR_mkdirat
172 #define __NR_sys_mknodat __NR_mknodat
173 #define __NR_sys_openat __NR_openat
174 #define __NR_sys_readlinkat __NR_readlinkat
175 #define __NR_sys_renameat __NR_renameat
176 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
177 #define __NR_sys_symlinkat __NR_symlinkat
178 #define __NR_sys_syslog __NR_syslog
179 #define __NR_sys_tgkill __NR_tgkill
180 #define __NR_sys_tkill __NR_tkill
181 #define __NR_sys_unlinkat __NR_unlinkat
182 #define __NR_sys_utimensat __NR_utimensat
183 #define __NR_sys_futex __NR_futex
184 #define __NR_sys_inotify_init __NR_inotify_init
185 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
186 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
188 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
189 #define __NR__llseek __NR_lseek
193 _syscall0(int, gettid)
195 /* This is a replacement for the host gettid() and must return a host
197 static int gettid(void) {
201 _syscall1(int,sys_exit,int,status)
202 _syscall1(int,sys_uname,struct new_utsname *,buf)
203 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
204 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
206 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
207 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
208 mode_t,mode,int,flags)
210 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
211 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
212 uid_t,owner,gid_t,group,int,flags)
214 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
215 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
216 struct stat *,buf,int,flags)
218 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
219 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
220 const struct timeval *,times)
222 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
223 #if TARGET_ABI_BITS == 32
224 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
226 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
227 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
229 _syscall2(int, sys_getpriority, int, which, int, who);
230 #if !defined (__x86_64__)
231 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
232 loff_t *, res, uint, wh);
234 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
235 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
236 int,newdirfd,const char *,newpath,int,flags)
238 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
239 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
241 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
242 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
243 mode_t,mode,dev_t,dev)
245 #if defined(TARGET_NR_openat) && defined(__NR_openat)
246 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
248 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
249 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
250 char *,buf,size_t,bufsize)
252 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
253 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
254 int,newdirfd,const char *,newpath)
256 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
257 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
258 _syscall3(int,sys_symlinkat,const char *,oldpath,
259 int,newdirfd,const char *,newpath)
261 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
262 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
263 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
265 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
266 _syscall2(int,sys_tkill,int,tid,int,sig)
268 #ifdef __NR_exit_group
269 _syscall1(int,exit_group,int,error_code)
271 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
272 _syscall1(int,set_tid_address,int *,tidptr)
274 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
275 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
277 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
278 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
279 const struct timespec *,tsp,int,flags)
281 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
282 _syscall0(int,sys_inotify_init)
284 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
285 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
287 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
288 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
290 #if defined(USE_NPTL)
291 #if defined(TARGET_NR_futex) && defined(__NR_futex)
292 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
293 const struct timespec *,timeout,int *,uaddr2,int,val3)
297 extern int personality(int);
298 extern int flock(int, int);
299 extern int setfsuid(int);
300 extern int setfsgid(int);
301 extern int setgroups(int, gid_t *);
303 #define ERRNO_TABLE_SIZE 1200
305 /* target_to_host_errno_table[] is initialized from
306 * host_to_target_errno_table[] in syscall_init(). */
307 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
311 * This list is the union of errno values overridden in asm-<arch>/errno.h
312 * minus the errnos that are not actually generic to all archs.
314 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
315 [EIDRM] = TARGET_EIDRM,
316 [ECHRNG] = TARGET_ECHRNG,
317 [EL2NSYNC] = TARGET_EL2NSYNC,
318 [EL3HLT] = TARGET_EL3HLT,
319 [EL3RST] = TARGET_EL3RST,
320 [ELNRNG] = TARGET_ELNRNG,
321 [EUNATCH] = TARGET_EUNATCH,
322 [ENOCSI] = TARGET_ENOCSI,
323 [EL2HLT] = TARGET_EL2HLT,
324 [EDEADLK] = TARGET_EDEADLK,
325 [ENOLCK] = TARGET_ENOLCK,
326 [EBADE] = TARGET_EBADE,
327 [EBADR] = TARGET_EBADR,
328 [EXFULL] = TARGET_EXFULL,
329 [ENOANO] = TARGET_ENOANO,
330 [EBADRQC] = TARGET_EBADRQC,
331 [EBADSLT] = TARGET_EBADSLT,
332 [EBFONT] = TARGET_EBFONT,
333 [ENOSTR] = TARGET_ENOSTR,
334 [ENODATA] = TARGET_ENODATA,
335 [ETIME] = TARGET_ETIME,
336 [ENOSR] = TARGET_ENOSR,
337 [ENONET] = TARGET_ENONET,
338 [ENOPKG] = TARGET_ENOPKG,
339 [EREMOTE] = TARGET_EREMOTE,
340 [ENOLINK] = TARGET_ENOLINK,
341 [EADV] = TARGET_EADV,
342 [ESRMNT] = TARGET_ESRMNT,
343 [ECOMM] = TARGET_ECOMM,
344 [EPROTO] = TARGET_EPROTO,
345 [EDOTDOT] = TARGET_EDOTDOT,
346 [EMULTIHOP] = TARGET_EMULTIHOP,
347 [EBADMSG] = TARGET_EBADMSG,
348 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
349 [EOVERFLOW] = TARGET_EOVERFLOW,
350 [ENOTUNIQ] = TARGET_ENOTUNIQ,
351 [EBADFD] = TARGET_EBADFD,
352 [EREMCHG] = TARGET_EREMCHG,
353 [ELIBACC] = TARGET_ELIBACC,
354 [ELIBBAD] = TARGET_ELIBBAD,
355 [ELIBSCN] = TARGET_ELIBSCN,
356 [ELIBMAX] = TARGET_ELIBMAX,
357 [ELIBEXEC] = TARGET_ELIBEXEC,
358 [EILSEQ] = TARGET_EILSEQ,
359 [ENOSYS] = TARGET_ENOSYS,
360 [ELOOP] = TARGET_ELOOP,
361 [ERESTART] = TARGET_ERESTART,
362 [ESTRPIPE] = TARGET_ESTRPIPE,
363 [ENOTEMPTY] = TARGET_ENOTEMPTY,
364 [EUSERS] = TARGET_EUSERS,
365 [ENOTSOCK] = TARGET_ENOTSOCK,
366 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
367 [EMSGSIZE] = TARGET_EMSGSIZE,
368 [EPROTOTYPE] = TARGET_EPROTOTYPE,
369 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
370 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
371 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
372 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
373 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
374 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
375 [EADDRINUSE] = TARGET_EADDRINUSE,
376 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
377 [ENETDOWN] = TARGET_ENETDOWN,
378 [ENETUNREACH] = TARGET_ENETUNREACH,
379 [ENETRESET] = TARGET_ENETRESET,
380 [ECONNABORTED] = TARGET_ECONNABORTED,
381 [ECONNRESET] = TARGET_ECONNRESET,
382 [ENOBUFS] = TARGET_ENOBUFS,
383 [EISCONN] = TARGET_EISCONN,
384 [ENOTCONN] = TARGET_ENOTCONN,
385 [EUCLEAN] = TARGET_EUCLEAN,
386 [ENOTNAM] = TARGET_ENOTNAM,
387 [ENAVAIL] = TARGET_ENAVAIL,
388 [EISNAM] = TARGET_EISNAM,
389 [EREMOTEIO] = TARGET_EREMOTEIO,
390 [ESHUTDOWN] = TARGET_ESHUTDOWN,
391 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
392 [ETIMEDOUT] = TARGET_ETIMEDOUT,
393 [ECONNREFUSED] = TARGET_ECONNREFUSED,
394 [EHOSTDOWN] = TARGET_EHOSTDOWN,
395 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
396 [EALREADY] = TARGET_EALREADY,
397 [EINPROGRESS] = TARGET_EINPROGRESS,
398 [ESTALE] = TARGET_ESTALE,
399 [ECANCELED] = TARGET_ECANCELED,
400 [ENOMEDIUM] = TARGET_ENOMEDIUM,
401 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
403 [ENOKEY] = TARGET_ENOKEY,
406 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
409 [EKEYREVOKED] = TARGET_EKEYREVOKED,
412 [EKEYREJECTED] = TARGET_EKEYREJECTED,
415 [EOWNERDEAD] = TARGET_EOWNERDEAD,
417 #ifdef ENOTRECOVERABLE
418 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
422 static inline int host_to_target_errno(int err)
424 if(host_to_target_errno_table[err])
425 return host_to_target_errno_table[err];
429 static inline int target_to_host_errno(int err)
431 if (target_to_host_errno_table[err])
432 return target_to_host_errno_table[err];
436 static inline abi_long get_errno(abi_long ret)
439 return -host_to_target_errno(errno);
444 static inline int is_error(abi_long ret)
446 return (abi_ulong)ret >= (abi_ulong)(-4096);
449 char *target_strerror(int err)
451 return strerror(target_to_host_errno(err));
454 static abi_ulong target_brk;
455 static abi_ulong target_original_brk;
457 void target_set_brk(abi_ulong new_brk)
459 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
462 /* do_brk() must return target values and target errnos. */
463 abi_long do_brk(abi_ulong new_brk)
466 abi_long mapped_addr;
471 if (new_brk < target_original_brk)
474 brk_page = HOST_PAGE_ALIGN(target_brk);
476 /* If the new brk is less than this, set it and we're done... */
477 if (new_brk < brk_page) {
478 target_brk = new_brk;
482 /* We need to allocate more memory after the brk... */
483 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
484 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
485 PROT_READ|PROT_WRITE,
486 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
488 if (!is_error(mapped_addr))
489 target_brk = new_brk;
494 static inline abi_long copy_from_user_fdset(fd_set *fds,
495 abi_ulong target_fds_addr,
499 abi_ulong b, *target_fds;
501 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
502 if (!(target_fds = lock_user(VERIFY_READ,
504 sizeof(abi_ulong) * nw,
506 return -TARGET_EFAULT;
510 for (i = 0; i < nw; i++) {
511 /* grab the abi_ulong */
512 __get_user(b, &target_fds[i]);
513 for (j = 0; j < TARGET_ABI_BITS; j++) {
514 /* check the bit inside the abi_ulong */
521 unlock_user(target_fds, target_fds_addr, 0);
526 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
532 abi_ulong *target_fds;
534 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
535 if (!(target_fds = lock_user(VERIFY_WRITE,
537 sizeof(abi_ulong) * nw,
539 return -TARGET_EFAULT;
542 for (i = 0; i < nw; i++) {
544 for (j = 0; j < TARGET_ABI_BITS; j++) {
545 v |= ((FD_ISSET(k, fds) != 0) << j);
548 __put_user(v, &target_fds[i]);
551 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
556 #if defined(__alpha__)
562 static inline abi_long host_to_target_clock_t(long ticks)
564 #if HOST_HZ == TARGET_HZ
567 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
571 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
572 const struct rusage *rusage)
574 struct target_rusage *target_rusage;
576 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
577 return -TARGET_EFAULT;
578 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
579 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
580 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
581 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
582 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
583 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
584 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
585 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
586 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
587 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
588 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
589 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
590 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
591 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
592 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
593 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
594 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
595 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
596 unlock_user_struct(target_rusage, target_addr, 1);
601 static inline abi_long copy_from_user_timeval(struct timeval *tv,
602 abi_ulong target_tv_addr)
604 struct target_timeval *target_tv;
606 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
607 return -TARGET_EFAULT;
609 __get_user(tv->tv_sec, &target_tv->tv_sec);
610 __get_user(tv->tv_usec, &target_tv->tv_usec);
612 unlock_user_struct(target_tv, target_tv_addr, 0);
617 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
618 const struct timeval *tv)
620 struct target_timeval *target_tv;
622 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
623 return -TARGET_EFAULT;
625 __put_user(tv->tv_sec, &target_tv->tv_sec);
626 __put_user(tv->tv_usec, &target_tv->tv_usec);
628 unlock_user_struct(target_tv, target_tv_addr, 1);
634 /* do_select() must return target values and target errnos. */
635 static abi_long do_select(int n,
636 abi_ulong rfd_addr, abi_ulong wfd_addr,
637 abi_ulong efd_addr, abi_ulong target_tv_addr)
639 fd_set rfds, wfds, efds;
640 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
641 struct timeval tv, *tv_ptr;
645 if (copy_from_user_fdset(&rfds, rfd_addr, n))
646 return -TARGET_EFAULT;
652 if (copy_from_user_fdset(&wfds, wfd_addr, n))
653 return -TARGET_EFAULT;
659 if (copy_from_user_fdset(&efds, efd_addr, n))
660 return -TARGET_EFAULT;
666 if (target_tv_addr) {
667 if (copy_from_user_timeval(&tv, target_tv_addr))
668 return -TARGET_EFAULT;
674 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
676 if (!is_error(ret)) {
677 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
678 return -TARGET_EFAULT;
679 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
680 return -TARGET_EFAULT;
681 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
682 return -TARGET_EFAULT;
684 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
685 return -TARGET_EFAULT;
691 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
692 abi_ulong target_addr,
695 struct target_sockaddr *target_saddr;
697 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
699 return -TARGET_EFAULT;
700 memcpy(addr, target_saddr, len);
701 addr->sa_family = tswap16(target_saddr->sa_family);
702 unlock_user(target_saddr, target_addr, 0);
707 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
708 struct sockaddr *addr,
711 struct target_sockaddr *target_saddr;
713 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
715 return -TARGET_EFAULT;
716 memcpy(target_saddr, addr, len);
717 target_saddr->sa_family = tswap16(addr->sa_family);
718 unlock_user(target_saddr, target_addr, len);
723 /* ??? Should this also swap msgh->name? */
724 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
725 struct target_msghdr *target_msgh)
727 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
728 abi_long msg_controllen;
729 abi_ulong target_cmsg_addr;
730 struct target_cmsghdr *target_cmsg;
733 msg_controllen = tswapl(target_msgh->msg_controllen);
734 if (msg_controllen < sizeof (struct target_cmsghdr))
736 target_cmsg_addr = tswapl(target_msgh->msg_control);
737 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
739 return -TARGET_EFAULT;
741 while (cmsg && target_cmsg) {
742 void *data = CMSG_DATA(cmsg);
743 void *target_data = TARGET_CMSG_DATA(target_cmsg);
745 int len = tswapl(target_cmsg->cmsg_len)
746 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
748 space += CMSG_SPACE(len);
749 if (space > msgh->msg_controllen) {
750 space -= CMSG_SPACE(len);
751 gemu_log("Host cmsg overflow\n");
755 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
756 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
757 cmsg->cmsg_len = CMSG_LEN(len);
759 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
760 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
761 memcpy(data, target_data, len);
763 int *fd = (int *)data;
764 int *target_fd = (int *)target_data;
765 int i, numfds = len / sizeof(int);
767 for (i = 0; i < numfds; i++)
768 fd[i] = tswap32(target_fd[i]);
771 cmsg = CMSG_NXTHDR(msgh, cmsg);
772 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
774 unlock_user(target_cmsg, target_cmsg_addr, 0);
776 msgh->msg_controllen = space;
780 /* ??? Should this also swap msgh->name? */
781 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
784 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
785 abi_long msg_controllen;
786 abi_ulong target_cmsg_addr;
787 struct target_cmsghdr *target_cmsg;
790 msg_controllen = tswapl(target_msgh->msg_controllen);
791 if (msg_controllen < sizeof (struct target_cmsghdr))
793 target_cmsg_addr = tswapl(target_msgh->msg_control);
794 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
796 return -TARGET_EFAULT;
798 while (cmsg && target_cmsg) {
799 void *data = CMSG_DATA(cmsg);
800 void *target_data = TARGET_CMSG_DATA(target_cmsg);
802 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
804 space += TARGET_CMSG_SPACE(len);
805 if (space > msg_controllen) {
806 space -= TARGET_CMSG_SPACE(len);
807 gemu_log("Target cmsg overflow\n");
811 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
812 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
813 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
815 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
816 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
817 memcpy(target_data, data, len);
819 int *fd = (int *)data;
820 int *target_fd = (int *)target_data;
821 int i, numfds = len / sizeof(int);
823 for (i = 0; i < numfds; i++)
824 target_fd[i] = tswap32(fd[i]);
827 cmsg = CMSG_NXTHDR(msgh, cmsg);
828 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
830 unlock_user(target_cmsg, target_cmsg_addr, space);
832 target_msgh->msg_controllen = tswapl(space);
836 /* do_setsockopt() Must return target values and target errnos. */
837 static abi_long do_setsockopt(int sockfd, int level, int optname,
838 abi_ulong optval_addr, socklen_t optlen)
845 /* TCP options all take an 'int' value. */
846 if (optlen < sizeof(uint32_t))
847 return -TARGET_EINVAL;
849 if (get_user_u32(val, optval_addr))
850 return -TARGET_EFAULT;
851 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
858 case IP_ROUTER_ALERT:
862 case IP_MTU_DISCOVER:
868 case IP_MULTICAST_TTL:
869 case IP_MULTICAST_LOOP:
871 if (optlen >= sizeof(uint32_t)) {
872 if (get_user_u32(val, optval_addr))
873 return -TARGET_EFAULT;
874 } else if (optlen >= 1) {
875 if (get_user_u8(val, optval_addr))
876 return -TARGET_EFAULT;
878 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
884 case TARGET_SOL_SOCKET:
886 /* Options with 'int' argument. */
887 case TARGET_SO_DEBUG:
890 case TARGET_SO_REUSEADDR:
891 optname = SO_REUSEADDR;
896 case TARGET_SO_ERROR:
899 case TARGET_SO_DONTROUTE:
900 optname = SO_DONTROUTE;
902 case TARGET_SO_BROADCAST:
903 optname = SO_BROADCAST;
905 case TARGET_SO_SNDBUF:
908 case TARGET_SO_RCVBUF:
911 case TARGET_SO_KEEPALIVE:
912 optname = SO_KEEPALIVE;
914 case TARGET_SO_OOBINLINE:
915 optname = SO_OOBINLINE;
917 case TARGET_SO_NO_CHECK:
918 optname = SO_NO_CHECK;
920 case TARGET_SO_PRIORITY:
921 optname = SO_PRIORITY;
924 case TARGET_SO_BSDCOMPAT:
925 optname = SO_BSDCOMPAT;
928 case TARGET_SO_PASSCRED:
929 optname = SO_PASSCRED;
931 case TARGET_SO_TIMESTAMP:
932 optname = SO_TIMESTAMP;
934 case TARGET_SO_RCVLOWAT:
935 optname = SO_RCVLOWAT;
937 case TARGET_SO_RCVTIMEO:
938 optname = SO_RCVTIMEO;
940 case TARGET_SO_SNDTIMEO:
941 optname = SO_SNDTIMEO;
947 if (optlen < sizeof(uint32_t))
948 return -TARGET_EINVAL;
950 if (get_user_u32(val, optval_addr))
951 return -TARGET_EFAULT;
952 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
956 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
957 ret = -TARGET_ENOPROTOOPT;
962 /* do_getsockopt() Must return target values and target errnos. */
963 static abi_long do_getsockopt(int sockfd, int level, int optname,
964 abi_ulong optval_addr, abi_ulong optlen)
971 case TARGET_SOL_SOCKET:
974 case TARGET_SO_LINGER:
975 case TARGET_SO_RCVTIMEO:
976 case TARGET_SO_SNDTIMEO:
977 case TARGET_SO_PEERCRED:
978 case TARGET_SO_PEERNAME:
979 /* These don't just return a single integer */
986 /* TCP options all take an 'int' value. */
988 if (get_user_u32(len, optlen))
989 return -TARGET_EFAULT;
991 return -TARGET_EINVAL;
993 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1000 if (put_user_u32(val, optval_addr))
1001 return -TARGET_EFAULT;
1003 if (put_user_u8(val, optval_addr))
1004 return -TARGET_EFAULT;
1006 if (put_user_u32(len, optlen))
1007 return -TARGET_EFAULT;
1014 case IP_ROUTER_ALERT:
1018 case IP_MTU_DISCOVER:
1024 case IP_MULTICAST_TTL:
1025 case IP_MULTICAST_LOOP:
1026 if (get_user_u32(len, optlen))
1027 return -TARGET_EFAULT;
1029 return -TARGET_EINVAL;
1031 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1034 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1036 if (put_user_u32(len, optlen)
1037 || put_user_u8(val, optval_addr))
1038 return -TARGET_EFAULT;
1040 if (len > sizeof(int))
1042 if (put_user_u32(len, optlen)
1043 || put_user_u32(val, optval_addr))
1044 return -TARGET_EFAULT;
1048 ret = -TARGET_ENOPROTOOPT;
1054 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1056 ret = -TARGET_EOPNOTSUPP;
1063 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1064 * other lock functions have a return code of 0 for failure.
1066 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1067 int count, int copy)
1069 struct target_iovec *target_vec;
1073 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1075 return -TARGET_EFAULT;
1076 for(i = 0;i < count; i++) {
1077 base = tswapl(target_vec[i].iov_base);
1078 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1079 if (vec[i].iov_len != 0) {
1080 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1081 /* Don't check lock_user return value. We must call writev even
1082 if a element has invalid base address. */
1084 /* zero length pointer is ignored */
1085 vec[i].iov_base = NULL;
1088 unlock_user (target_vec, target_addr, 0);
1092 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1093 int count, int copy)
1095 struct target_iovec *target_vec;
1099 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1101 return -TARGET_EFAULT;
1102 for(i = 0;i < count; i++) {
1103 if (target_vec[i].iov_base) {
1104 base = tswapl(target_vec[i].iov_base);
1105 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1108 unlock_user (target_vec, target_addr, 0);
1113 /* do_socket() Must return target values and target errnos. */
1114 static abi_long do_socket(int domain, int type, int protocol)
1116 #if defined(TARGET_MIPS)
1118 case TARGET_SOCK_DGRAM:
1121 case TARGET_SOCK_STREAM:
1124 case TARGET_SOCK_RAW:
1127 case TARGET_SOCK_RDM:
1130 case TARGET_SOCK_SEQPACKET:
1131 type = SOCK_SEQPACKET;
1133 case TARGET_SOCK_PACKET:
1138 if (domain == PF_NETLINK)
1139 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1140 return get_errno(socket(domain, type, protocol));
1143 /* MAX_SOCK_ADDR from linux/net/socket.c */
1144 #define MAX_SOCK_ADDR 128
1146 /* do_bind() Must return target values and target errnos. */
1147 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1152 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1153 return -TARGET_EINVAL;
1155 addr = alloca(addrlen);
1157 target_to_host_sockaddr(addr, target_addr, addrlen);
1158 return get_errno(bind(sockfd, addr, addrlen));
1161 /* do_connect() Must return target values and target errnos. */
1162 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1167 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1168 return -TARGET_EINVAL;
1170 addr = alloca(addrlen);
1172 target_to_host_sockaddr(addr, target_addr, addrlen);
1173 return get_errno(connect(sockfd, addr, addrlen));
1176 /* do_sendrecvmsg() Must return target values and target errnos. */
1177 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1178 int flags, int send)
1181 struct target_msghdr *msgp;
1185 abi_ulong target_vec;
1188 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1192 return -TARGET_EFAULT;
1193 if (msgp->msg_name) {
1194 msg.msg_namelen = tswap32(msgp->msg_namelen);
1195 msg.msg_name = alloca(msg.msg_namelen);
1196 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1199 msg.msg_name = NULL;
1200 msg.msg_namelen = 0;
1202 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1203 msg.msg_control = alloca(msg.msg_controllen);
1204 msg.msg_flags = tswap32(msgp->msg_flags);
1206 count = tswapl(msgp->msg_iovlen);
1207 vec = alloca(count * sizeof(struct iovec));
1208 target_vec = tswapl(msgp->msg_iov);
1209 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1210 msg.msg_iovlen = count;
1214 ret = target_to_host_cmsg(&msg, msgp);
1216 ret = get_errno(sendmsg(fd, &msg, flags));
1218 ret = get_errno(recvmsg(fd, &msg, flags));
1219 if (!is_error(ret)) {
1221 ret = host_to_target_cmsg(msgp, &msg);
1226 unlock_iovec(vec, target_vec, count, !send);
1227 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1231 /* do_accept() Must return target values and target errnos. */
1232 static abi_long do_accept(int fd, abi_ulong target_addr,
1233 abi_ulong target_addrlen_addr)
1239 if (get_user_u32(addrlen, target_addrlen_addr))
1240 return -TARGET_EFAULT;
1242 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1243 return -TARGET_EINVAL;
1245 addr = alloca(addrlen);
1247 ret = get_errno(accept(fd, addr, &addrlen));
1248 if (!is_error(ret)) {
1249 host_to_target_sockaddr(target_addr, addr, addrlen);
1250 if (put_user_u32(addrlen, target_addrlen_addr))
1251 ret = -TARGET_EFAULT;
1256 /* do_getpeername() Must return target values and target errnos. */
1257 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1258 abi_ulong target_addrlen_addr)
1264 if (get_user_u32(addrlen, target_addrlen_addr))
1265 return -TARGET_EFAULT;
1267 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1268 return -TARGET_EINVAL;
1270 addr = alloca(addrlen);
1272 ret = get_errno(getpeername(fd, addr, &addrlen));
1273 if (!is_error(ret)) {
1274 host_to_target_sockaddr(target_addr, addr, addrlen);
1275 if (put_user_u32(addrlen, target_addrlen_addr))
1276 ret = -TARGET_EFAULT;
1281 /* do_getsockname() Must return target values and target errnos. */
1282 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1283 abi_ulong target_addrlen_addr)
1289 if (target_addr == 0)
1290 return get_errno(accept(fd, NULL, NULL));
1292 if (get_user_u32(addrlen, target_addrlen_addr))
1293 return -TARGET_EFAULT;
1295 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1296 return -TARGET_EINVAL;
1298 addr = alloca(addrlen);
1300 ret = get_errno(getsockname(fd, addr, &addrlen));
1301 if (!is_error(ret)) {
1302 host_to_target_sockaddr(target_addr, addr, addrlen);
1303 if (put_user_u32(addrlen, target_addrlen_addr))
1304 ret = -TARGET_EFAULT;
1309 /* do_socketpair() Must return target values and target errnos. */
1310 static abi_long do_socketpair(int domain, int type, int protocol,
1311 abi_ulong target_tab_addr)
1316 ret = get_errno(socketpair(domain, type, protocol, tab));
1317 if (!is_error(ret)) {
1318 if (put_user_s32(tab[0], target_tab_addr)
1319 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1320 ret = -TARGET_EFAULT;
1325 /* do_sendto() Must return target values and target errnos. */
1326 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1327 abi_ulong target_addr, socklen_t addrlen)
1333 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1334 return -TARGET_EINVAL;
1336 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1338 return -TARGET_EFAULT;
1340 addr = alloca(addrlen);
1341 target_to_host_sockaddr(addr, target_addr, addrlen);
1342 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1344 ret = get_errno(send(fd, host_msg, len, flags));
1346 unlock_user(host_msg, msg, 0);
1350 /* do_recvfrom() Must return target values and target errnos. */
1351 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1352 abi_ulong target_addr,
1353 abi_ulong target_addrlen)
1360 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1362 return -TARGET_EFAULT;
1364 if (get_user_u32(addrlen, target_addrlen)) {
1365 ret = -TARGET_EFAULT;
1368 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1369 ret = -TARGET_EINVAL;
1372 addr = alloca(addrlen);
1373 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1375 addr = NULL; /* To keep compiler quiet. */
1376 ret = get_errno(recv(fd, host_msg, len, flags));
1378 if (!is_error(ret)) {
1380 host_to_target_sockaddr(target_addr, addr, addrlen);
1381 if (put_user_u32(addrlen, target_addrlen)) {
1382 ret = -TARGET_EFAULT;
1386 unlock_user(host_msg, msg, len);
1389 unlock_user(host_msg, msg, 0);
1394 #ifdef TARGET_NR_socketcall
1395 /* do_socketcall() Must return target values and target errnos. */
1396 static abi_long do_socketcall(int num, abi_ulong vptr)
1399 const int n = sizeof(abi_ulong);
1404 int domain, type, protocol;
1406 if (get_user_s32(domain, vptr)
1407 || get_user_s32(type, vptr + n)
1408 || get_user_s32(protocol, vptr + 2 * n))
1409 return -TARGET_EFAULT;
1411 ret = do_socket(domain, type, protocol);
1417 abi_ulong target_addr;
1420 if (get_user_s32(sockfd, vptr)
1421 || get_user_ual(target_addr, vptr + n)
1422 || get_user_u32(addrlen, vptr + 2 * n))
1423 return -TARGET_EFAULT;
1425 ret = do_bind(sockfd, target_addr, addrlen);
1428 case SOCKOP_connect:
1431 abi_ulong target_addr;
1434 if (get_user_s32(sockfd, vptr)
1435 || get_user_ual(target_addr, vptr + n)
1436 || get_user_u32(addrlen, vptr + 2 * n))
1437 return -TARGET_EFAULT;
1439 ret = do_connect(sockfd, target_addr, addrlen);
1444 int sockfd, backlog;
1446 if (get_user_s32(sockfd, vptr)
1447 || get_user_s32(backlog, vptr + n))
1448 return -TARGET_EFAULT;
1450 ret = get_errno(listen(sockfd, backlog));
1456 abi_ulong target_addr, target_addrlen;
1458 if (get_user_s32(sockfd, vptr)
1459 || get_user_ual(target_addr, vptr + n)
1460 || get_user_u32(target_addrlen, vptr + 2 * n))
1461 return -TARGET_EFAULT;
1463 ret = do_accept(sockfd, target_addr, target_addrlen);
1466 case SOCKOP_getsockname:
1469 abi_ulong target_addr, target_addrlen;
1471 if (get_user_s32(sockfd, vptr)
1472 || get_user_ual(target_addr, vptr + n)
1473 || get_user_u32(target_addrlen, vptr + 2 * n))
1474 return -TARGET_EFAULT;
1476 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1479 case SOCKOP_getpeername:
1482 abi_ulong target_addr, target_addrlen;
1484 if (get_user_s32(sockfd, vptr)
1485 || get_user_ual(target_addr, vptr + n)
1486 || get_user_u32(target_addrlen, vptr + 2 * n))
1487 return -TARGET_EFAULT;
1489 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1492 case SOCKOP_socketpair:
1494 int domain, type, protocol;
1497 if (get_user_s32(domain, vptr)
1498 || get_user_s32(type, vptr + n)
1499 || get_user_s32(protocol, vptr + 2 * n)
1500 || get_user_ual(tab, vptr + 3 * n))
1501 return -TARGET_EFAULT;
1503 ret = do_socketpair(domain, type, protocol, tab);
1513 if (get_user_s32(sockfd, vptr)
1514 || get_user_ual(msg, vptr + n)
1515 || get_user_ual(len, vptr + 2 * n)
1516 || get_user_s32(flags, vptr + 3 * n))
1517 return -TARGET_EFAULT;
1519 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1529 if (get_user_s32(sockfd, vptr)
1530 || get_user_ual(msg, vptr + n)
1531 || get_user_ual(len, vptr + 2 * n)
1532 || get_user_s32(flags, vptr + 3 * n))
1533 return -TARGET_EFAULT;
1535 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1547 if (get_user_s32(sockfd, vptr)
1548 || get_user_ual(msg, vptr + n)
1549 || get_user_ual(len, vptr + 2 * n)
1550 || get_user_s32(flags, vptr + 3 * n)
1551 || get_user_ual(addr, vptr + 4 * n)
1552 || get_user_u32(addrlen, vptr + 5 * n))
1553 return -TARGET_EFAULT;
1555 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1558 case SOCKOP_recvfrom:
1567 if (get_user_s32(sockfd, vptr)
1568 || get_user_ual(msg, vptr + n)
1569 || get_user_ual(len, vptr + 2 * n)
1570 || get_user_s32(flags, vptr + 3 * n)
1571 || get_user_ual(addr, vptr + 4 * n)
1572 || get_user_u32(addrlen, vptr + 5 * n))
1573 return -TARGET_EFAULT;
1575 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1578 case SOCKOP_shutdown:
1582 if (get_user_s32(sockfd, vptr)
1583 || get_user_s32(how, vptr + n))
1584 return -TARGET_EFAULT;
1586 ret = get_errno(shutdown(sockfd, how));
1589 case SOCKOP_sendmsg:
1590 case SOCKOP_recvmsg:
1593 abi_ulong target_msg;
1596 if (get_user_s32(fd, vptr)
1597 || get_user_ual(target_msg, vptr + n)
1598 || get_user_s32(flags, vptr + 2 * n))
1599 return -TARGET_EFAULT;
1601 ret = do_sendrecvmsg(fd, target_msg, flags,
1602 (num == SOCKOP_sendmsg));
1605 case SOCKOP_setsockopt:
1613 if (get_user_s32(sockfd, vptr)
1614 || get_user_s32(level, vptr + n)
1615 || get_user_s32(optname, vptr + 2 * n)
1616 || get_user_ual(optval, vptr + 3 * n)
1617 || get_user_u32(optlen, vptr + 4 * n))
1618 return -TARGET_EFAULT;
1620 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1623 case SOCKOP_getsockopt:
1631 if (get_user_s32(sockfd, vptr)
1632 || get_user_s32(level, vptr + n)
1633 || get_user_s32(optname, vptr + 2 * n)
1634 || get_user_ual(optval, vptr + 3 * n)
1635 || get_user_u32(optlen, vptr + 4 * n))
1636 return -TARGET_EFAULT;
1638 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1642 gemu_log("Unsupported socketcall: %d\n", num);
1643 ret = -TARGET_ENOSYS;
1650 #ifdef TARGET_NR_ipc
1651 #define N_SHM_REGIONS 32
1653 static struct shm_region {
1656 } shm_regions[N_SHM_REGIONS];
1659 struct target_ipc_perm
1666 unsigned short int mode;
1667 unsigned short int __pad1;
1668 unsigned short int __seq;
1669 unsigned short int __pad2;
1670 abi_ulong __unused1;
1671 abi_ulong __unused2;
1674 struct target_semid_ds
1676 struct target_ipc_perm sem_perm;
1677 abi_ulong sem_otime;
1678 abi_ulong __unused1;
1679 abi_ulong sem_ctime;
1680 abi_ulong __unused2;
1681 abi_ulong sem_nsems;
1682 abi_ulong __unused3;
1683 abi_ulong __unused4;
1686 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1687 abi_ulong target_addr)
1689 struct target_ipc_perm *target_ip;
1690 struct target_semid_ds *target_sd;
1692 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1693 return -TARGET_EFAULT;
1694 target_ip=&(target_sd->sem_perm);
1695 host_ip->__key = tswapl(target_ip->__key);
1696 host_ip->uid = tswapl(target_ip->uid);
1697 host_ip->gid = tswapl(target_ip->gid);
1698 host_ip->cuid = tswapl(target_ip->cuid);
1699 host_ip->cgid = tswapl(target_ip->cgid);
1700 host_ip->mode = tswapl(target_ip->mode);
1701 unlock_user_struct(target_sd, target_addr, 0);
1705 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1706 struct ipc_perm *host_ip)
1708 struct target_ipc_perm *target_ip;
1709 struct target_semid_ds *target_sd;
1711 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1712 return -TARGET_EFAULT;
1713 target_ip = &(target_sd->sem_perm);
1714 target_ip->__key = tswapl(host_ip->__key);
1715 target_ip->uid = tswapl(host_ip->uid);
1716 target_ip->gid = tswapl(host_ip->gid);
1717 target_ip->cuid = tswapl(host_ip->cuid);
1718 target_ip->cgid = tswapl(host_ip->cgid);
1719 target_ip->mode = tswapl(host_ip->mode);
1720 unlock_user_struct(target_sd, target_addr, 1);
1724 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1725 abi_ulong target_addr)
1727 struct target_semid_ds *target_sd;
1729 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1730 return -TARGET_EFAULT;
1731 target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr);
1732 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1733 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1734 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1735 unlock_user_struct(target_sd, target_addr, 0);
1739 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1740 struct semid_ds *host_sd)
1742 struct target_semid_ds *target_sd;
1744 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1745 return -TARGET_EFAULT;
1746 host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm));
1747 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1748 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1749 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1750 unlock_user_struct(target_sd, target_addr, 1);
1756 struct semid_ds *buf;
1757 unsigned short *array;
1760 union target_semun {
1763 unsigned short int *array;
1766 static inline abi_long target_to_host_semun(int cmd,
1767 union semun *host_su,
1768 abi_ulong target_addr,
1769 struct semid_ds *ds)
1771 union target_semun *target_su;
1776 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1777 return -TARGET_EFAULT;
1778 target_to_host_semid_ds(ds,target_su->buf);
1780 unlock_user_struct(target_su, target_addr, 0);
1784 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1785 return -TARGET_EFAULT;
1786 host_su->val = tswapl(target_su->val);
1787 unlock_user_struct(target_su, target_addr, 0);
1791 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1792 return -TARGET_EFAULT;
1793 *host_su->array = tswap16(*target_su->array);
1794 unlock_user_struct(target_su, target_addr, 0);
1797 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1802 static inline abi_long host_to_target_semun(int cmd,
1803 abi_ulong target_addr,
1804 union semun *host_su,
1805 struct semid_ds *ds)
1807 union target_semun *target_su;
1812 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1813 return -TARGET_EFAULT;
1814 host_to_target_semid_ds(target_su->buf,ds);
1815 unlock_user_struct(target_su, target_addr, 1);
1819 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1820 return -TARGET_EFAULT;
1821 target_su->val = tswapl(host_su->val);
1822 unlock_user_struct(target_su, target_addr, 1);
1826 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1827 return -TARGET_EFAULT;
1828 *target_su->array = tswap16(*host_su->array);
1829 unlock_user_struct(target_su, target_addr, 1);
1832 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1837 static inline abi_long do_semctl(int first, int second, int third,
1841 struct semid_ds dsarg;
1842 int cmd = third&0xff;
1847 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1848 ret = get_errno(semctl(first, second, cmd, arg));
1849 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1852 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1853 ret = get_errno(semctl(first, second, cmd, arg));
1854 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1857 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1858 ret = get_errno(semctl(first, second, cmd, arg));
1859 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1862 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1863 ret = get_errno(semctl(first, second, cmd, arg));
1864 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1867 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1868 ret = get_errno(semctl(first, second, cmd, arg));
1869 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1872 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1873 ret = get_errno(semctl(first, second, cmd, arg));
1874 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1877 ret = get_errno(semctl(first, second, cmd, arg));
1883 struct target_msqid_ds
1885 struct target_ipc_perm msg_perm;
1886 abi_ulong msg_stime;
1887 #if TARGET_ABI_BITS == 32
1888 abi_ulong __unused1;
1890 abi_ulong msg_rtime;
1891 #if TARGET_ABI_BITS == 32
1892 abi_ulong __unused2;
1894 abi_ulong msg_ctime;
1895 #if TARGET_ABI_BITS == 32
1896 abi_ulong __unused3;
1898 abi_ulong __msg_cbytes;
1900 abi_ulong msg_qbytes;
1901 abi_ulong msg_lspid;
1902 abi_ulong msg_lrpid;
1903 abi_ulong __unused4;
1904 abi_ulong __unused5;
1907 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
1908 abi_ulong target_addr)
1910 struct target_msqid_ds *target_md;
1912 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
1913 return -TARGET_EFAULT;
1914 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
1915 return -TARGET_EFAULT;
1916 host_md->msg_stime = tswapl(target_md->msg_stime);
1917 host_md->msg_rtime = tswapl(target_md->msg_rtime);
1918 host_md->msg_ctime = tswapl(target_md->msg_ctime);
1919 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
1920 host_md->msg_qnum = tswapl(target_md->msg_qnum);
1921 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
1922 host_md->msg_lspid = tswapl(target_md->msg_lspid);
1923 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
1924 unlock_user_struct(target_md, target_addr, 0);
1928 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
1929 struct msqid_ds *host_md)
1931 struct target_msqid_ds *target_md;
1933 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
1934 return -TARGET_EFAULT;
1935 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
1936 return -TARGET_EFAULT;
1937 target_md->msg_stime = tswapl(host_md->msg_stime);
1938 target_md->msg_rtime = tswapl(host_md->msg_rtime);
1939 target_md->msg_ctime = tswapl(host_md->msg_ctime);
1940 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
1941 target_md->msg_qnum = tswapl(host_md->msg_qnum);
1942 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
1943 target_md->msg_lspid = tswapl(host_md->msg_lspid);
1944 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
1945 unlock_user_struct(target_md, target_addr, 1);
1949 struct target_msginfo {
1957 unsigned short int msgseg;
1960 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
1961 struct msginfo *host_msginfo)
1963 struct target_msginfo *target_msginfo;
1964 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
1965 return -TARGET_EFAULT;
1966 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
1967 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
1968 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
1969 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
1970 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
1971 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
1972 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
1973 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
1974 unlock_user_struct(target_msginfo, target_addr, 1);
1978 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
1980 struct msqid_ds dsarg;
1981 struct msginfo msginfo;
1982 abi_long ret = -TARGET_EINVAL;
1990 if (target_to_host_msqid_ds(&dsarg,ptr))
1991 return -TARGET_EFAULT;
1992 ret = get_errno(msgctl(msgid, cmd, &dsarg));
1993 if (host_to_target_msqid_ds(ptr,&dsarg))
1994 return -TARGET_EFAULT;
1997 ret = get_errno(msgctl(msgid, cmd, NULL));
2001 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2002 if (host_to_target_msginfo(ptr, &msginfo))
2003 return -TARGET_EFAULT;
2010 struct target_msgbuf {
2015 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2016 unsigned int msgsz, int msgflg)
2018 struct target_msgbuf *target_mb;
2019 struct msgbuf *host_mb;
2022 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2023 return -TARGET_EFAULT;
2024 host_mb = malloc(msgsz+sizeof(long));
2025 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2026 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2027 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2029 unlock_user_struct(target_mb, msgp, 0);
2034 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2035 unsigned int msgsz, abi_long msgtyp,
2038 struct target_msgbuf *target_mb;
2040 struct msgbuf *host_mb;
2043 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2044 return -TARGET_EFAULT;
2046 host_mb = malloc(msgsz+sizeof(long));
2047 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2050 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2051 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2052 if (!target_mtext) {
2053 ret = -TARGET_EFAULT;
2056 memcpy(target_mb->mtext, host_mb->mtext, ret);
2057 unlock_user(target_mtext, target_mtext_addr, ret);
2060 target_mb->mtype = tswapl(host_mb->mtype);
2065 unlock_user_struct(target_mb, msgp, 1);
2069 #ifdef TARGET_NR_ipc
2070 /* ??? This only works with linear mappings. */
2071 /* do_ipc() must return target values and target errnos. */
2072 static abi_long do_ipc(unsigned int call, int first,
2073 int second, int third,
2074 abi_long ptr, abi_long fifth)
2078 struct shmid_ds shm_info;
2081 version = call >> 16;
2086 ret = get_errno(semop(first,(struct sembuf *)g2h(ptr), second));
2090 ret = get_errno(semget(first, second, third));
2094 ret = do_semctl(first, second, third, ptr);
2097 case IPCOP_semtimedop:
2098 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2099 ret = -TARGET_ENOSYS;
2103 ret = get_errno(msgget(first, second));
2107 ret = do_msgsnd(first, ptr, second, third);
2111 ret = do_msgctl(first, second, ptr);
2118 struct target_ipc_kludge {
2123 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2124 ret = -TARGET_EFAULT;
2128 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2130 unlock_user_struct(tmp, ptr, 0);
2134 ret = do_msgrcv(first, ptr, second, fifth, third);
2142 /* SHM_* flags are the same on all linux platforms */
2143 host_addr = shmat(first, (void *)g2h(ptr), second);
2144 if (host_addr == (void *)-1) {
2145 ret = get_errno((long)host_addr);
2148 raddr = h2g((unsigned long)host_addr);
2149 /* find out the length of the shared memory segment */
2151 ret = get_errno(shmctl(first, IPC_STAT, &shm_info));
2152 if (is_error(ret)) {
2153 /* can't get length, bail out */
2157 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2158 PAGE_VALID | PAGE_READ |
2159 ((second & SHM_RDONLY)? 0: PAGE_WRITE));
2160 for (i = 0; i < N_SHM_REGIONS; ++i) {
2161 if (shm_regions[i].start == 0) {
2162 shm_regions[i].start = raddr;
2163 shm_regions[i].size = shm_info.shm_segsz;
2167 if (put_user_ual(raddr, third))
2168 return -TARGET_EFAULT;
2173 for (i = 0; i < N_SHM_REGIONS; ++i) {
2174 if (shm_regions[i].start == ptr) {
2175 shm_regions[i].start = 0;
2176 page_set_flags(ptr, shm_regions[i].size, 0);
2180 ret = get_errno(shmdt((void *)g2h(ptr)));
2184 /* IPC_* flag values are the same on all linux platforms */
2185 ret = get_errno(shmget(first, second, third));
2188 /* IPC_* and SHM_* command values are the same on all linux platforms */
2194 ret = get_errno(shmctl(first, second, NULL));
2202 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2203 ret = -TARGET_ENOSYS;
2210 /* kernel structure types definitions */
2213 #define STRUCT(name, list...) STRUCT_ ## name,
2214 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2216 #include "syscall_types.h"
2219 #undef STRUCT_SPECIAL
2221 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2222 #define STRUCT_SPECIAL(name)
2223 #include "syscall_types.h"
2225 #undef STRUCT_SPECIAL
2227 typedef struct IOCTLEntry {
2228 unsigned int target_cmd;
2229 unsigned int host_cmd;
2232 const argtype arg_type[5];
2235 #define IOC_R 0x0001
2236 #define IOC_W 0x0002
2237 #define IOC_RW (IOC_R | IOC_W)
2239 #define MAX_STRUCT_SIZE 4096
2241 static IOCTLEntry ioctl_entries[] = {
2242 #define IOCTL(cmd, access, types...) \
2243 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2248 /* ??? Implement proper locking for ioctls. */
2249 /* do_ioctl() Must return target values and target errnos. */
2250 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2252 const IOCTLEntry *ie;
2253 const argtype *arg_type;
2255 uint8_t buf_temp[MAX_STRUCT_SIZE];
2261 if (ie->target_cmd == 0) {
2262 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2263 return -TARGET_ENOSYS;
2265 if (ie->target_cmd == cmd)
2269 arg_type = ie->arg_type;
2271 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2273 switch(arg_type[0]) {
2276 ret = get_errno(ioctl(fd, ie->host_cmd));
2281 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2285 target_size = thunk_type_size(arg_type, 0);
2286 switch(ie->access) {
2288 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2289 if (!is_error(ret)) {
2290 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2292 return -TARGET_EFAULT;
2293 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2294 unlock_user(argptr, arg, target_size);
2298 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2300 return -TARGET_EFAULT;
2301 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2302 unlock_user(argptr, arg, 0);
2303 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2307 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2309 return -TARGET_EFAULT;
2310 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2311 unlock_user(argptr, arg, 0);
2312 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2313 if (!is_error(ret)) {
2314 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2316 return -TARGET_EFAULT;
2317 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2318 unlock_user(argptr, arg, target_size);
2324 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2325 (long)cmd, arg_type[0]);
2326 ret = -TARGET_ENOSYS;
2332 static const bitmask_transtbl iflag_tbl[] = {
2333 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2334 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2335 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2336 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2337 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2338 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2339 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2340 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2341 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2342 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2343 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2344 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2345 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2346 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2350 static const bitmask_transtbl oflag_tbl[] = {
2351 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2352 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2353 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2354 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2355 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2356 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2357 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2358 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2359 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2360 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2361 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2362 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2363 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2364 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2365 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2366 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2367 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2368 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2369 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2370 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2371 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2372 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2373 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2374 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2378 static const bitmask_transtbl cflag_tbl[] = {
2379 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2380 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2381 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2382 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2383 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2384 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2385 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2386 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2387 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2388 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2389 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2390 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2391 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2392 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2393 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2394 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2395 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2396 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2397 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2398 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2399 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2400 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2401 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2402 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2403 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2404 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2405 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2406 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2407 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2408 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2409 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2413 static const bitmask_transtbl lflag_tbl[] = {
2414 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2415 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2416 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2417 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2418 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2419 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2420 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2421 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2422 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2423 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2424 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2425 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2426 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2427 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2428 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2432 static void target_to_host_termios (void *dst, const void *src)
2434 struct host_termios *host = dst;
2435 const struct target_termios *target = src;
2438 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2440 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2442 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2444 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2445 host->c_line = target->c_line;
2447 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2448 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2449 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2450 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2451 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2452 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2453 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2454 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2455 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2456 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2457 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2458 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2459 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2460 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2461 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2462 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2463 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2466 static void host_to_target_termios (void *dst, const void *src)
2468 struct target_termios *target = dst;
2469 const struct host_termios *host = src;
2472 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2474 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2476 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2478 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2479 target->c_line = host->c_line;
2481 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2482 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2483 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2484 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2485 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2486 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2487 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2488 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2489 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2490 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2491 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2492 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2493 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2494 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2495 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2496 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2497 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2500 static const StructEntry struct_termios_def = {
2501 .convert = { host_to_target_termios, target_to_host_termios },
2502 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2503 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2506 static bitmask_transtbl mmap_flags_tbl[] = {
2507 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2508 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2509 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2510 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2511 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2512 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2513 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2514 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2518 static bitmask_transtbl fcntl_flags_tbl[] = {
2519 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2520 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2521 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2522 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2523 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2524 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2525 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2526 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2527 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2528 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2529 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2530 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2531 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2532 #if defined(O_DIRECT)
2533 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2538 #if defined(TARGET_I386)
2540 /* NOTE: there is really one LDT for all the threads */
2541 static uint8_t *ldt_table;
2543 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2550 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2551 if (size > bytecount)
2553 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2555 return -TARGET_EFAULT;
2556 /* ??? Should this by byteswapped? */
2557 memcpy(p, ldt_table, size);
2558 unlock_user(p, ptr, size);
2562 /* XXX: add locking support */
2563 static abi_long write_ldt(CPUX86State *env,
2564 abi_ulong ptr, unsigned long bytecount, int oldmode)
2566 struct target_modify_ldt_ldt_s ldt_info;
2567 struct target_modify_ldt_ldt_s *target_ldt_info;
2568 int seg_32bit, contents, read_exec_only, limit_in_pages;
2569 int seg_not_present, useable, lm;
2570 uint32_t *lp, entry_1, entry_2;
2572 if (bytecount != sizeof(ldt_info))
2573 return -TARGET_EINVAL;
2574 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2575 return -TARGET_EFAULT;
2576 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2577 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2578 ldt_info.limit = tswap32(target_ldt_info->limit);
2579 ldt_info.flags = tswap32(target_ldt_info->flags);
2580 unlock_user_struct(target_ldt_info, ptr, 0);
2582 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2583 return -TARGET_EINVAL;
2584 seg_32bit = ldt_info.flags & 1;
2585 contents = (ldt_info.flags >> 1) & 3;
2586 read_exec_only = (ldt_info.flags >> 3) & 1;
2587 limit_in_pages = (ldt_info.flags >> 4) & 1;
2588 seg_not_present = (ldt_info.flags >> 5) & 1;
2589 useable = (ldt_info.flags >> 6) & 1;
2593 lm = (ldt_info.flags >> 7) & 1;
2595 if (contents == 3) {
2597 return -TARGET_EINVAL;
2598 if (seg_not_present == 0)
2599 return -TARGET_EINVAL;
2601 /* allocate the LDT */
2603 env->ldt.base = target_mmap(0,
2604 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
2605 PROT_READ|PROT_WRITE,
2606 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
2607 if (env->ldt.base == -1)
2608 return -TARGET_ENOMEM;
2609 memset(g2h(env->ldt.base), 0,
2610 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2611 env->ldt.limit = 0xffff;
2612 ldt_table = g2h(env->ldt.base);
2615 /* NOTE: same code as Linux kernel */
2616 /* Allow LDTs to be cleared by the user. */
2617 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2620 read_exec_only == 1 &&
2622 limit_in_pages == 0 &&
2623 seg_not_present == 1 &&
2631 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2632 (ldt_info.limit & 0x0ffff);
2633 entry_2 = (ldt_info.base_addr & 0xff000000) |
2634 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2635 (ldt_info.limit & 0xf0000) |
2636 ((read_exec_only ^ 1) << 9) |
2638 ((seg_not_present ^ 1) << 15) |
2640 (limit_in_pages << 23) |
2644 entry_2 |= (useable << 20);
2646 /* Install the new entry ... */
2648 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2649 lp[0] = tswap32(entry_1);
2650 lp[1] = tswap32(entry_2);
2654 /* specific and weird i386 syscalls */
2655 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2656 unsigned long bytecount)
2662 ret = read_ldt(ptr, bytecount);
2665 ret = write_ldt(env, ptr, bytecount, 1);
2668 ret = write_ldt(env, ptr, bytecount, 0);
2671 ret = -TARGET_ENOSYS;
2677 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2678 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2680 uint64_t *gdt_table = g2h(env->gdt.base);
2681 struct target_modify_ldt_ldt_s ldt_info;
2682 struct target_modify_ldt_ldt_s *target_ldt_info;
2683 int seg_32bit, contents, read_exec_only, limit_in_pages;
2684 int seg_not_present, useable, lm;
2685 uint32_t *lp, entry_1, entry_2;
2688 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2689 if (!target_ldt_info)
2690 return -TARGET_EFAULT;
2691 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2692 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2693 ldt_info.limit = tswap32(target_ldt_info->limit);
2694 ldt_info.flags = tswap32(target_ldt_info->flags);
2695 if (ldt_info.entry_number == -1) {
2696 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
2697 if (gdt_table[i] == 0) {
2698 ldt_info.entry_number = i;
2699 target_ldt_info->entry_number = tswap32(i);
2704 unlock_user_struct(target_ldt_info, ptr, 1);
2706 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
2707 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
2708 return -TARGET_EINVAL;
2709 seg_32bit = ldt_info.flags & 1;
2710 contents = (ldt_info.flags >> 1) & 3;
2711 read_exec_only = (ldt_info.flags >> 3) & 1;
2712 limit_in_pages = (ldt_info.flags >> 4) & 1;
2713 seg_not_present = (ldt_info.flags >> 5) & 1;
2714 useable = (ldt_info.flags >> 6) & 1;
2718 lm = (ldt_info.flags >> 7) & 1;
2721 if (contents == 3) {
2722 if (seg_not_present == 0)
2723 return -TARGET_EINVAL;
2726 /* NOTE: same code as Linux kernel */
2727 /* Allow LDTs to be cleared by the user. */
2728 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2729 if ((contents == 0 &&
2730 read_exec_only == 1 &&
2732 limit_in_pages == 0 &&
2733 seg_not_present == 1 &&
2741 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2742 (ldt_info.limit & 0x0ffff);
2743 entry_2 = (ldt_info.base_addr & 0xff000000) |
2744 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2745 (ldt_info.limit & 0xf0000) |
2746 ((read_exec_only ^ 1) << 9) |
2748 ((seg_not_present ^ 1) << 15) |
2750 (limit_in_pages << 23) |
2755 /* Install the new entry ... */
2757 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
2758 lp[0] = tswap32(entry_1);
2759 lp[1] = tswap32(entry_2);
2763 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
2765 struct target_modify_ldt_ldt_s *target_ldt_info;
2766 uint64_t *gdt_table = g2h(env->gdt.base);
2767 uint32_t base_addr, limit, flags;
2768 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
2769 int seg_not_present, useable, lm;
2770 uint32_t *lp, entry_1, entry_2;
2772 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2773 if (!target_ldt_info)
2774 return -TARGET_EFAULT;
2775 idx = tswap32(target_ldt_info->entry_number);
2776 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
2777 idx > TARGET_GDT_ENTRY_TLS_MAX) {
2778 unlock_user_struct(target_ldt_info, ptr, 1);
2779 return -TARGET_EINVAL;
2781 lp = (uint32_t *)(gdt_table + idx);
2782 entry_1 = tswap32(lp[0]);
2783 entry_2 = tswap32(lp[1]);
2785 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
2786 contents = (entry_2 >> 10) & 3;
2787 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
2788 seg_32bit = (entry_2 >> 22) & 1;
2789 limit_in_pages = (entry_2 >> 23) & 1;
2790 useable = (entry_2 >> 20) & 1;
2794 lm = (entry_2 >> 21) & 1;
2796 flags = (seg_32bit << 0) | (contents << 1) |
2797 (read_exec_only << 3) | (limit_in_pages << 4) |
2798 (seg_not_present << 5) | (useable << 6) | (lm << 7);
2799 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
2800 base_addr = (entry_1 >> 16) |
2801 (entry_2 & 0xff000000) |
2802 ((entry_2 & 0xff) << 16);
2803 target_ldt_info->base_addr = tswapl(base_addr);
2804 target_ldt_info->limit = tswap32(limit);
2805 target_ldt_info->flags = tswap32(flags);
2806 unlock_user_struct(target_ldt_info, ptr, 1);
2809 #endif /* TARGET_I386 && TARGET_ABI32 */
2811 #ifndef TARGET_ABI32
2812 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
2819 case TARGET_ARCH_SET_GS:
2820 case TARGET_ARCH_SET_FS:
2821 if (code == TARGET_ARCH_SET_GS)
2825 cpu_x86_load_seg(env, idx, 0);
2826 env->segs[idx].base = addr;
2828 case TARGET_ARCH_GET_GS:
2829 case TARGET_ARCH_GET_FS:
2830 if (code == TARGET_ARCH_GET_GS)
2834 val = env->segs[idx].base;
2835 if (put_user(val, addr, abi_ulong))
2836 return -TARGET_EFAULT;
2839 ret = -TARGET_EINVAL;
2846 #endif /* defined(TARGET_I386) */
2848 #if defined(USE_NPTL)
2850 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
2852 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
2855 pthread_mutex_t mutex;
2856 pthread_cond_t cond;
2859 abi_ulong child_tidptr;
2860 abi_ulong parent_tidptr;
2864 static void *clone_func(void *arg)
2866 new_thread_info *info = arg;
2871 info->tid = gettid();
2872 if (info->child_tidptr)
2873 put_user_u32(info->tid, info->child_tidptr);
2874 if (info->parent_tidptr)
2875 put_user_u32(info->tid, info->parent_tidptr);
2876 /* Enable signals. */
2877 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
2878 /* Signal to the parent that we're ready. */
2879 pthread_mutex_lock(&info->mutex);
2880 pthread_cond_broadcast(&info->cond);
2881 pthread_mutex_unlock(&info->mutex);
2882 /* Wait until the parent has finshed initializing the tls state. */
2883 pthread_mutex_lock(&clone_lock);
2884 pthread_mutex_unlock(&clone_lock);
2890 /* this stack is the equivalent of the kernel stack associated with a
2892 #define NEW_STACK_SIZE 8192
2894 static int clone_func(void *arg)
2896 CPUState *env = arg;
2903 /* do_fork() Must return host values and target errnos (unlike most
2904 do_*() functions). */
2905 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
2906 abi_ulong parent_tidptr, target_ulong newtls,
2907 abi_ulong child_tidptr)
2913 #if defined(USE_NPTL)
2914 unsigned int nptl_flags;
2918 /* Emulate vfork() with fork() */
2919 if (flags & CLONE_VFORK)
2920 flags &= ~(CLONE_VFORK | CLONE_VM);
2922 if (flags & CLONE_VM) {
2923 #if defined(USE_NPTL)
2924 new_thread_info info;
2925 pthread_attr_t attr;
2927 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
2928 init_task_state(ts);
2929 new_stack = ts->stack;
2930 /* we create a new CPU instance. */
2931 new_env = cpu_copy(env);
2932 /* Init regs that differ from the parent. */
2933 cpu_clone_regs(new_env, newsp);
2934 new_env->opaque = ts;
2935 #if defined(USE_NPTL)
2937 flags &= ~CLONE_NPTL_FLAGS2;
2939 /* TODO: Implement CLONE_CHILD_CLEARTID. */
2940 if (nptl_flags & CLONE_SETTLS)
2941 cpu_set_tls (new_env, newtls);
2943 /* Grab a mutex so that thread setup appears atomic. */
2944 pthread_mutex_lock(&clone_lock);
2946 memset(&info, 0, sizeof(info));
2947 pthread_mutex_init(&info.mutex, NULL);
2948 pthread_mutex_lock(&info.mutex);
2949 pthread_cond_init(&info.cond, NULL);
2951 if (nptl_flags & CLONE_CHILD_SETTID)
2952 info.child_tidptr = child_tidptr;
2953 if (nptl_flags & CLONE_PARENT_SETTID)
2954 info.parent_tidptr = parent_tidptr;
2956 ret = pthread_attr_init(&attr);
2957 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
2958 /* It is not safe to deliver signals until the child has finished
2959 initializing, so temporarily block all signals. */
2960 sigfillset(&sigmask);
2961 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
2963 ret = pthread_create(&info.thread, &attr, clone_func, &info);
2965 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
2966 pthread_attr_destroy(&attr);
2968 /* Wait for the child to initialize. */
2969 pthread_cond_wait(&info.cond, &info.mutex);
2971 if (flags & CLONE_PARENT_SETTID)
2972 put_user_u32(ret, parent_tidptr);
2976 pthread_mutex_unlock(&info.mutex);
2977 pthread_cond_destroy(&info.cond);
2978 pthread_mutex_destroy(&info.mutex);
2979 pthread_mutex_unlock(&clone_lock);
2981 if (flags & CLONE_NPTL_FLAGS2)
2983 /* This is probably going to die very quickly, but do it anyway. */
2985 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
2987 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
2991 /* if no CLONE_VM, we consider it is a fork */
2992 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
2997 /* Child Process. */
2998 cpu_clone_regs(env, newsp);
3000 #if defined(USE_NPTL)
3001 /* There is a race condition here. The parent process could
3002 theoretically read the TID in the child process before the child
3003 tid is set. This would require using either ptrace
3004 (not implemented) or having *_tidptr to point at a shared memory
3005 mapping. We can't repeat the spinlock hack used above because
3006 the child process gets its own copy of the lock. */
3007 if (flags & CLONE_CHILD_SETTID)
3008 put_user_u32(gettid(), child_tidptr);
3009 if (flags & CLONE_PARENT_SETTID)
3010 put_user_u32(gettid(), parent_tidptr);
3011 ts = (TaskState *)env->opaque;
3012 if (flags & CLONE_SETTLS)
3013 cpu_set_tls (env, newtls);
3014 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3023 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3026 struct target_flock *target_fl;
3027 struct flock64 fl64;
3028 struct target_flock64 *target_fl64;
3032 case TARGET_F_GETLK:
3033 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3034 return -TARGET_EFAULT;
3035 fl.l_type = tswap16(target_fl->l_type);
3036 fl.l_whence = tswap16(target_fl->l_whence);
3037 fl.l_start = tswapl(target_fl->l_start);
3038 fl.l_len = tswapl(target_fl->l_len);
3039 fl.l_pid = tswapl(target_fl->l_pid);
3040 unlock_user_struct(target_fl, arg, 0);
3041 ret = get_errno(fcntl(fd, cmd, &fl));
3043 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3044 return -TARGET_EFAULT;
3045 target_fl->l_type = tswap16(fl.l_type);
3046 target_fl->l_whence = tswap16(fl.l_whence);
3047 target_fl->l_start = tswapl(fl.l_start);
3048 target_fl->l_len = tswapl(fl.l_len);
3049 target_fl->l_pid = tswapl(fl.l_pid);
3050 unlock_user_struct(target_fl, arg, 1);
3054 case TARGET_F_SETLK:
3055 case TARGET_F_SETLKW:
3056 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3057 return -TARGET_EFAULT;
3058 fl.l_type = tswap16(target_fl->l_type);
3059 fl.l_whence = tswap16(target_fl->l_whence);
3060 fl.l_start = tswapl(target_fl->l_start);
3061 fl.l_len = tswapl(target_fl->l_len);
3062 fl.l_pid = tswapl(target_fl->l_pid);
3063 unlock_user_struct(target_fl, arg, 0);
3064 ret = get_errno(fcntl(fd, cmd, &fl));
3067 case TARGET_F_GETLK64:
3068 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3069 return -TARGET_EFAULT;
3070 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3071 fl64.l_whence = tswap16(target_fl64->l_whence);
3072 fl64.l_start = tswapl(target_fl64->l_start);
3073 fl64.l_len = tswapl(target_fl64->l_len);
3074 fl64.l_pid = tswap16(target_fl64->l_pid);
3075 unlock_user_struct(target_fl64, arg, 0);
3076 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3078 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3079 return -TARGET_EFAULT;
3080 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3081 target_fl64->l_whence = tswap16(fl64.l_whence);
3082 target_fl64->l_start = tswapl(fl64.l_start);
3083 target_fl64->l_len = tswapl(fl64.l_len);
3084 target_fl64->l_pid = tswapl(fl64.l_pid);
3085 unlock_user_struct(target_fl64, arg, 1);
3088 case TARGET_F_SETLK64:
3089 case TARGET_F_SETLKW64:
3090 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3091 return -TARGET_EFAULT;
3092 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3093 fl64.l_whence = tswap16(target_fl64->l_whence);
3094 fl64.l_start = tswapl(target_fl64->l_start);
3095 fl64.l_len = tswapl(target_fl64->l_len);
3096 fl64.l_pid = tswap16(target_fl64->l_pid);
3097 unlock_user_struct(target_fl64, arg, 0);
3098 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3102 ret = get_errno(fcntl(fd, cmd, arg));
3104 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3109 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3113 ret = get_errno(fcntl(fd, cmd, arg));
3121 static inline int high2lowuid(int uid)
3129 static inline int high2lowgid(int gid)
3137 static inline int low2highuid(int uid)
3139 if ((int16_t)uid == -1)
3145 static inline int low2highgid(int gid)
3147 if ((int16_t)gid == -1)
3153 #endif /* USE_UID16 */
3155 void syscall_init(void)
3158 const argtype *arg_type;
3162 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3163 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3164 #include "syscall_types.h"
3166 #undef STRUCT_SPECIAL
3168 /* we patch the ioctl size if necessary. We rely on the fact that
3169 no ioctl has all the bits at '1' in the size field */
3171 while (ie->target_cmd != 0) {
3172 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3173 TARGET_IOC_SIZEMASK) {
3174 arg_type = ie->arg_type;
3175 if (arg_type[0] != TYPE_PTR) {
3176 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3181 size = thunk_type_size(arg_type, 0);
3182 ie->target_cmd = (ie->target_cmd &
3183 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3184 (size << TARGET_IOC_SIZESHIFT);
3187 /* Build target_to_host_errno_table[] table from
3188 * host_to_target_errno_table[]. */
3189 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3190 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3192 /* automatic consistency check if same arch */
3193 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3194 (defined(__x86_64__) && defined(TARGET_X86_64))
3195 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3196 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3197 ie->name, ie->target_cmd, ie->host_cmd);
3204 #if TARGET_ABI_BITS == 32
3205 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3207 #ifdef TARGET_WORDS_BIGENDIAN
3208 return ((uint64_t)word0 << 32) | word1;
3210 return ((uint64_t)word1 << 32) | word0;
3213 #else /* TARGET_ABI_BITS == 32 */
3214 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3218 #endif /* TARGET_ABI_BITS != 32 */
3220 #ifdef TARGET_NR_truncate64
3221 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3227 if (((CPUARMState *)cpu_env)->eabi)
3233 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3237 #ifdef TARGET_NR_ftruncate64
3238 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3244 if (((CPUARMState *)cpu_env)->eabi)
3250 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3254 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3255 abi_ulong target_addr)
3257 struct target_timespec *target_ts;
3259 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3260 return -TARGET_EFAULT;
3261 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3262 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3263 unlock_user_struct(target_ts, target_addr, 0);
3267 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3268 struct timespec *host_ts)
3270 struct target_timespec *target_ts;
3272 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3273 return -TARGET_EFAULT;
3274 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3275 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3276 unlock_user_struct(target_ts, target_addr, 1);
3280 #ifdef TARGET_NR_stat64
3281 static inline abi_long host_to_target_stat64(void *cpu_env,
3282 abi_ulong target_addr,
3283 struct stat *host_st)
3286 if (((CPUARMState *)cpu_env)->eabi) {
3287 struct target_eabi_stat64 *target_st;
3289 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3290 return -TARGET_EFAULT;
3291 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3292 __put_user(host_st->st_dev, &target_st->st_dev);
3293 __put_user(host_st->st_ino, &target_st->st_ino);
3294 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3295 __put_user(host_st->st_ino, &target_st->__st_ino);
3297 __put_user(host_st->st_mode, &target_st->st_mode);
3298 __put_user(host_st->st_nlink, &target_st->st_nlink);
3299 __put_user(host_st->st_uid, &target_st->st_uid);
3300 __put_user(host_st->st_gid, &target_st->st_gid);
3301 __put_user(host_st->st_rdev, &target_st->st_rdev);
3302 __put_user(host_st->st_size, &target_st->st_size);
3303 __put_user(host_st->st_blksize, &target_st->st_blksize);
3304 __put_user(host_st->st_blocks, &target_st->st_blocks);
3305 __put_user(host_st->st_atime, &target_st->target_st_atime);
3306 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3307 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3308 unlock_user_struct(target_st, target_addr, 1);
3312 struct target_stat64 *target_st;
3314 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3315 return -TARGET_EFAULT;
3316 memset(target_st, 0, sizeof(struct target_stat64));
3317 __put_user(host_st->st_dev, &target_st->st_dev);
3318 __put_user(host_st->st_ino, &target_st->st_ino);
3319 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3320 __put_user(host_st->st_ino, &target_st->__st_ino);
3322 __put_user(host_st->st_mode, &target_st->st_mode);
3323 __put_user(host_st->st_nlink, &target_st->st_nlink);
3324 __put_user(host_st->st_uid, &target_st->st_uid);
3325 __put_user(host_st->st_gid, &target_st->st_gid);
3326 __put_user(host_st->st_rdev, &target_st->st_rdev);
3327 /* XXX: better use of kernel struct */
3328 __put_user(host_st->st_size, &target_st->st_size);
3329 __put_user(host_st->st_blksize, &target_st->st_blksize);
3330 __put_user(host_st->st_blocks, &target_st->st_blocks);
3331 __put_user(host_st->st_atime, &target_st->target_st_atime);
3332 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3333 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3334 unlock_user_struct(target_st, target_addr, 1);
3341 #if defined(USE_NPTL)
3342 /* ??? Using host futex calls even when target atomic operations
3343 are not really atomic probably breaks things. However implementing
3344 futexes locally would make futexes shared between multiple processes
3345 tricky. However they're probably useless because guest atomic
3346 operations won't work either. */
3347 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3348 target_ulong uaddr2, int val3)
3350 struct timespec ts, *pts;
3352 /* ??? We assume FUTEX_* constants are the same on both host
3358 target_to_host_timespec(pts, timeout);
3362 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3365 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3367 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3369 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3370 NULL, g2h(uaddr2), 0));
3371 case FUTEX_CMP_REQUEUE:
3372 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3373 NULL, g2h(uaddr2), tswap32(val3)));
3375 return -TARGET_ENOSYS;
3380 int get_osversion(void)
3382 static int osversion;
3383 struct new_utsname buf;
3388 if (qemu_uname_release && *qemu_uname_release) {
3389 s = qemu_uname_release;
3391 if (sys_uname(&buf))
3396 for (i = 0; i < 3; i++) {
3398 while (*s >= '0' && *s <= '9') {
3403 tmp = (tmp << 8) + n;
3411 /* do_syscall() should always have a single exit point at the end so
3412 that actions, such as logging of syscall results, can be performed.
3413 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3414 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3415 abi_long arg2, abi_long arg3, abi_long arg4,
3416 abi_long arg5, abi_long arg6)
3424 gemu_log("syscall %d", num);
3427 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3430 case TARGET_NR_exit:
3434 gdb_exit(cpu_env, arg1);
3435 /* XXX: should free thread stack and CPU env */
3437 ret = 0; /* avoid warning */
3439 case TARGET_NR_read:
3443 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3445 ret = get_errno(read(arg1, p, arg3));
3446 unlock_user(p, arg2, ret);
3449 case TARGET_NR_write:
3450 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3452 ret = get_errno(write(arg1, p, arg3));
3453 unlock_user(p, arg2, 0);
3455 case TARGET_NR_open:
3456 if (!(p = lock_user_string(arg1)))
3458 ret = get_errno(open(path(p),
3459 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3461 unlock_user(p, arg1, 0);
3463 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3464 case TARGET_NR_openat:
3465 if (!(p = lock_user_string(arg2)))
3467 ret = get_errno(sys_openat(arg1,
3469 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3471 unlock_user(p, arg2, 0);
3474 case TARGET_NR_close:
3475 ret = get_errno(close(arg1));
3480 case TARGET_NR_fork:
3481 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3483 #ifdef TARGET_NR_waitpid
3484 case TARGET_NR_waitpid:
3487 ret = get_errno(waitpid(arg1, &status, arg3));
3488 if (!is_error(ret) && arg2
3489 && put_user_s32(status, arg2))
3494 #ifdef TARGET_NR_waitid
3495 case TARGET_NR_waitid:
3499 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3500 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3501 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3503 host_to_target_siginfo(p, &info);
3504 unlock_user(p, arg3, sizeof(target_siginfo_t));
3509 #ifdef TARGET_NR_creat /* not on alpha */
3510 case TARGET_NR_creat:
3511 if (!(p = lock_user_string(arg1)))
3513 ret = get_errno(creat(p, arg2));
3514 unlock_user(p, arg1, 0);
3517 case TARGET_NR_link:
3520 p = lock_user_string(arg1);
3521 p2 = lock_user_string(arg2);
3523 ret = -TARGET_EFAULT;
3525 ret = get_errno(link(p, p2));
3526 unlock_user(p2, arg2, 0);
3527 unlock_user(p, arg1, 0);
3530 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3531 case TARGET_NR_linkat:
3536 p = lock_user_string(arg2);
3537 p2 = lock_user_string(arg4);
3539 ret = -TARGET_EFAULT;
3541 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3542 unlock_user(p, arg2, 0);
3543 unlock_user(p2, arg4, 0);
3547 case TARGET_NR_unlink:
3548 if (!(p = lock_user_string(arg1)))
3550 ret = get_errno(unlink(p));
3551 unlock_user(p, arg1, 0);
3553 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3554 case TARGET_NR_unlinkat:
3555 if (!(p = lock_user_string(arg2)))
3557 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3558 unlock_user(p, arg2, 0);
3561 case TARGET_NR_execve:
3563 char **argp, **envp;
3566 abi_ulong guest_argp;
3567 abi_ulong guest_envp;
3573 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3574 if (get_user_ual(addr, gp))
3582 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3583 if (get_user_ual(addr, gp))
3590 argp = alloca((argc + 1) * sizeof(void *));
3591 envp = alloca((envc + 1) * sizeof(void *));
3593 for (gp = guest_argp, q = argp; gp;
3594 gp += sizeof(abi_ulong), q++) {
3595 if (get_user_ual(addr, gp))
3599 if (!(*q = lock_user_string(addr)))
3604 for (gp = guest_envp, q = envp; gp;
3605 gp += sizeof(abi_ulong), q++) {
3606 if (get_user_ual(addr, gp))
3610 if (!(*q = lock_user_string(addr)))
3615 if (!(p = lock_user_string(arg1)))
3617 ret = get_errno(execve(p, argp, envp));
3618 unlock_user(p, arg1, 0);
3623 ret = -TARGET_EFAULT;
3626 for (gp = guest_argp, q = argp; *q;
3627 gp += sizeof(abi_ulong), q++) {
3628 if (get_user_ual(addr, gp)
3631 unlock_user(*q, addr, 0);
3633 for (gp = guest_envp, q = envp; *q;
3634 gp += sizeof(abi_ulong), q++) {
3635 if (get_user_ual(addr, gp)
3638 unlock_user(*q, addr, 0);
3642 case TARGET_NR_chdir:
3643 if (!(p = lock_user_string(arg1)))
3645 ret = get_errno(chdir(p));
3646 unlock_user(p, arg1, 0);
3648 #ifdef TARGET_NR_time
3649 case TARGET_NR_time:
3652 ret = get_errno(time(&host_time));
3655 && put_user_sal(host_time, arg1))
3660 case TARGET_NR_mknod:
3661 if (!(p = lock_user_string(arg1)))
3663 ret = get_errno(mknod(p, arg2, arg3));
3664 unlock_user(p, arg1, 0);
3666 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3667 case TARGET_NR_mknodat:
3668 if (!(p = lock_user_string(arg2)))
3670 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3671 unlock_user(p, arg2, 0);
3674 case TARGET_NR_chmod:
3675 if (!(p = lock_user_string(arg1)))
3677 ret = get_errno(chmod(p, arg2));
3678 unlock_user(p, arg1, 0);
3680 #ifdef TARGET_NR_break
3681 case TARGET_NR_break:
3684 #ifdef TARGET_NR_oldstat
3685 case TARGET_NR_oldstat:
3688 case TARGET_NR_lseek:
3689 ret = get_errno(lseek(arg1, arg2, arg3));
3691 #ifdef TARGET_NR_getxpid
3692 case TARGET_NR_getxpid:
3694 case TARGET_NR_getpid:
3696 ret = get_errno(getpid());
3698 case TARGET_NR_mount:
3700 /* need to look at the data field */
3702 p = lock_user_string(arg1);
3703 p2 = lock_user_string(arg2);
3704 p3 = lock_user_string(arg3);
3705 if (!p || !p2 || !p3)
3706 ret = -TARGET_EFAULT;
3708 /* FIXME - arg5 should be locked, but it isn't clear how to
3709 * do that since it's not guaranteed to be a NULL-terminated
3712 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
3713 unlock_user(p, arg1, 0);
3714 unlock_user(p2, arg2, 0);
3715 unlock_user(p3, arg3, 0);
3718 #ifdef TARGET_NR_umount
3719 case TARGET_NR_umount:
3720 if (!(p = lock_user_string(arg1)))
3722 ret = get_errno(umount(p));
3723 unlock_user(p, arg1, 0);
3726 #ifdef TARGET_NR_stime /* not on alpha */
3727 case TARGET_NR_stime:
3730 if (get_user_sal(host_time, arg1))
3732 ret = get_errno(stime(&host_time));
3736 case TARGET_NR_ptrace:
3738 #ifdef TARGET_NR_alarm /* not on alpha */
3739 case TARGET_NR_alarm:
3743 #ifdef TARGET_NR_oldfstat
3744 case TARGET_NR_oldfstat:
3747 #ifdef TARGET_NR_pause /* not on alpha */
3748 case TARGET_NR_pause:
3749 ret = get_errno(pause());
3752 #ifdef TARGET_NR_utime
3753 case TARGET_NR_utime:
3755 struct utimbuf tbuf, *host_tbuf;
3756 struct target_utimbuf *target_tbuf;
3758 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
3760 tbuf.actime = tswapl(target_tbuf->actime);
3761 tbuf.modtime = tswapl(target_tbuf->modtime);
3762 unlock_user_struct(target_tbuf, arg2, 0);
3767 if (!(p = lock_user_string(arg1)))
3769 ret = get_errno(utime(p, host_tbuf));
3770 unlock_user(p, arg1, 0);
3774 case TARGET_NR_utimes:
3776 struct timeval *tvp, tv[2];
3778 if (copy_from_user_timeval(&tv[0], arg2)
3779 || copy_from_user_timeval(&tv[1],
3780 arg2 + sizeof(struct target_timeval)))
3786 if (!(p = lock_user_string(arg1)))
3788 ret = get_errno(utimes(p, tvp));
3789 unlock_user(p, arg1, 0);
3792 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
3793 case TARGET_NR_futimesat:
3795 struct timeval *tvp, tv[2];
3797 if (copy_from_user_timeval(&tv[0], arg3)
3798 || copy_from_user_timeval(&tv[1],
3799 arg3 + sizeof(struct target_timeval)))
3805 if (!(p = lock_user_string(arg2)))
3807 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
3808 unlock_user(p, arg2, 0);
3812 #ifdef TARGET_NR_stty
3813 case TARGET_NR_stty:
3816 #ifdef TARGET_NR_gtty
3817 case TARGET_NR_gtty:
3820 case TARGET_NR_access:
3821 if (!(p = lock_user_string(arg1)))
3823 ret = get_errno(access(p, arg2));
3824 unlock_user(p, arg1, 0);
3826 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
3827 case TARGET_NR_faccessat:
3828 if (!(p = lock_user_string(arg2)))
3830 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
3831 unlock_user(p, arg2, 0);
3834 #ifdef TARGET_NR_nice /* not on alpha */
3835 case TARGET_NR_nice:
3836 ret = get_errno(nice(arg1));
3839 #ifdef TARGET_NR_ftime
3840 case TARGET_NR_ftime:
3843 case TARGET_NR_sync:
3847 case TARGET_NR_kill:
3848 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
3850 case TARGET_NR_rename:
3853 p = lock_user_string(arg1);
3854 p2 = lock_user_string(arg2);
3856 ret = -TARGET_EFAULT;
3858 ret = get_errno(rename(p, p2));
3859 unlock_user(p2, arg2, 0);
3860 unlock_user(p, arg1, 0);
3863 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
3864 case TARGET_NR_renameat:
3867 p = lock_user_string(arg2);
3868 p2 = lock_user_string(arg4);
3870 ret = -TARGET_EFAULT;
3872 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
3873 unlock_user(p2, arg4, 0);
3874 unlock_user(p, arg2, 0);
3878 case TARGET_NR_mkdir:
3879 if (!(p = lock_user_string(arg1)))
3881 ret = get_errno(mkdir(p, arg2));
3882 unlock_user(p, arg1, 0);
3884 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
3885 case TARGET_NR_mkdirat:
3886 if (!(p = lock_user_string(arg2)))
3888 ret = get_errno(sys_mkdirat(arg1, p, arg3));
3889 unlock_user(p, arg2, 0);
3892 case TARGET_NR_rmdir:
3893 if (!(p = lock_user_string(arg1)))
3895 ret = get_errno(rmdir(p));
3896 unlock_user(p, arg1, 0);
3899 ret = get_errno(dup(arg1));
3901 case TARGET_NR_pipe:
3904 ret = get_errno(pipe(host_pipe));
3905 if (!is_error(ret)) {
3906 #if defined(TARGET_MIPS)
3907 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
3908 env->active_tc.gpr[3] = host_pipe[1];
3910 #elif defined(TARGET_SH4)
3911 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
3914 if (put_user_s32(host_pipe[0], arg1)
3915 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
3921 case TARGET_NR_times:
3923 struct target_tms *tmsp;
3925 ret = get_errno(times(&tms));
3927 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
3930 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
3931 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
3932 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
3933 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
3936 ret = host_to_target_clock_t(ret);
3939 #ifdef TARGET_NR_prof
3940 case TARGET_NR_prof:
3943 #ifdef TARGET_NR_signal
3944 case TARGET_NR_signal:
3947 case TARGET_NR_acct:
3949 ret = get_errno(acct(NULL));
3951 if (!(p = lock_user_string(arg1)))
3953 ret = get_errno(acct(path(p)));
3954 unlock_user(p, arg1, 0);
3957 #ifdef TARGET_NR_umount2 /* not on alpha */
3958 case TARGET_NR_umount2:
3959 if (!(p = lock_user_string(arg1)))
3961 ret = get_errno(umount2(p, arg2));
3962 unlock_user(p, arg1, 0);
3965 #ifdef TARGET_NR_lock
3966 case TARGET_NR_lock:
3969 case TARGET_NR_ioctl:
3970 ret = do_ioctl(arg1, arg2, arg3);
3972 case TARGET_NR_fcntl:
3973 ret = do_fcntl(arg1, arg2, arg3);
3975 #ifdef TARGET_NR_mpx
3979 case TARGET_NR_setpgid:
3980 ret = get_errno(setpgid(arg1, arg2));
3982 #ifdef TARGET_NR_ulimit
3983 case TARGET_NR_ulimit:
3986 #ifdef TARGET_NR_oldolduname
3987 case TARGET_NR_oldolduname:
3990 case TARGET_NR_umask:
3991 ret = get_errno(umask(arg1));
3993 case TARGET_NR_chroot:
3994 if (!(p = lock_user_string(arg1)))
3996 ret = get_errno(chroot(p));
3997 unlock_user(p, arg1, 0);
3999 case TARGET_NR_ustat:
4001 case TARGET_NR_dup2:
4002 ret = get_errno(dup2(arg1, arg2));
4004 #ifdef TARGET_NR_getppid /* not on alpha */
4005 case TARGET_NR_getppid:
4006 ret = get_errno(getppid());
4009 case TARGET_NR_getpgrp:
4010 ret = get_errno(getpgrp());
4012 case TARGET_NR_setsid:
4013 ret = get_errno(setsid());
4015 #ifdef TARGET_NR_sigaction
4016 case TARGET_NR_sigaction:
4018 #if !defined(TARGET_MIPS)
4019 struct target_old_sigaction *old_act;
4020 struct target_sigaction act, oact, *pact;
4022 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4024 act._sa_handler = old_act->_sa_handler;
4025 target_siginitset(&act.sa_mask, old_act->sa_mask);
4026 act.sa_flags = old_act->sa_flags;
4027 act.sa_restorer = old_act->sa_restorer;
4028 unlock_user_struct(old_act, arg2, 0);
4033 ret = get_errno(do_sigaction(arg1, pact, &oact));
4034 if (!is_error(ret) && arg3) {
4035 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4037 old_act->_sa_handler = oact._sa_handler;
4038 old_act->sa_mask = oact.sa_mask.sig[0];
4039 old_act->sa_flags = oact.sa_flags;
4040 old_act->sa_restorer = oact.sa_restorer;
4041 unlock_user_struct(old_act, arg3, 1);
4044 struct target_sigaction act, oact, *pact, *old_act;
4047 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4049 act._sa_handler = old_act->_sa_handler;
4050 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4051 act.sa_flags = old_act->sa_flags;
4052 unlock_user_struct(old_act, arg2, 0);
4058 ret = get_errno(do_sigaction(arg1, pact, &oact));
4060 if (!is_error(ret) && arg3) {
4061 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4063 old_act->_sa_handler = oact._sa_handler;
4064 old_act->sa_flags = oact.sa_flags;
4065 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4066 old_act->sa_mask.sig[1] = 0;
4067 old_act->sa_mask.sig[2] = 0;
4068 old_act->sa_mask.sig[3] = 0;
4069 unlock_user_struct(old_act, arg3, 1);
4075 case TARGET_NR_rt_sigaction:
4077 struct target_sigaction *act;
4078 struct target_sigaction *oact;
4081 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4086 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4087 ret = -TARGET_EFAULT;
4088 goto rt_sigaction_fail;
4092 ret = get_errno(do_sigaction(arg1, act, oact));
4095 unlock_user_struct(act, arg2, 0);
4097 unlock_user_struct(oact, arg3, 1);
4100 #ifdef TARGET_NR_sgetmask /* not on alpha */
4101 case TARGET_NR_sgetmask:
4104 abi_ulong target_set;
4105 sigprocmask(0, NULL, &cur_set);
4106 host_to_target_old_sigset(&target_set, &cur_set);
4111 #ifdef TARGET_NR_ssetmask /* not on alpha */
4112 case TARGET_NR_ssetmask:
4114 sigset_t set, oset, cur_set;
4115 abi_ulong target_set = arg1;
4116 sigprocmask(0, NULL, &cur_set);
4117 target_to_host_old_sigset(&set, &target_set);
4118 sigorset(&set, &set, &cur_set);
4119 sigprocmask(SIG_SETMASK, &set, &oset);
4120 host_to_target_old_sigset(&target_set, &oset);
4125 #ifdef TARGET_NR_sigprocmask
4126 case TARGET_NR_sigprocmask:
4129 sigset_t set, oldset, *set_ptr;
4133 case TARGET_SIG_BLOCK:
4136 case TARGET_SIG_UNBLOCK:
4139 case TARGET_SIG_SETMASK:
4143 ret = -TARGET_EINVAL;
4146 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4148 target_to_host_old_sigset(&set, p);
4149 unlock_user(p, arg2, 0);
4155 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4156 if (!is_error(ret) && arg3) {
4157 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4159 host_to_target_old_sigset(p, &oldset);
4160 unlock_user(p, arg3, sizeof(target_sigset_t));
4165 case TARGET_NR_rt_sigprocmask:
4168 sigset_t set, oldset, *set_ptr;
4172 case TARGET_SIG_BLOCK:
4175 case TARGET_SIG_UNBLOCK:
4178 case TARGET_SIG_SETMASK:
4182 ret = -TARGET_EINVAL;
4185 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4187 target_to_host_sigset(&set, p);
4188 unlock_user(p, arg2, 0);
4194 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4195 if (!is_error(ret) && arg3) {
4196 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4198 host_to_target_sigset(p, &oldset);
4199 unlock_user(p, arg3, sizeof(target_sigset_t));
4203 #ifdef TARGET_NR_sigpending
4204 case TARGET_NR_sigpending:
4207 ret = get_errno(sigpending(&set));
4208 if (!is_error(ret)) {
4209 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4211 host_to_target_old_sigset(p, &set);
4212 unlock_user(p, arg1, sizeof(target_sigset_t));
4217 case TARGET_NR_rt_sigpending:
4220 ret = get_errno(sigpending(&set));
4221 if (!is_error(ret)) {
4222 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4224 host_to_target_sigset(p, &set);
4225 unlock_user(p, arg1, sizeof(target_sigset_t));
4229 #ifdef TARGET_NR_sigsuspend
4230 case TARGET_NR_sigsuspend:
4233 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4235 target_to_host_old_sigset(&set, p);
4236 unlock_user(p, arg1, 0);
4237 ret = get_errno(sigsuspend(&set));
4241 case TARGET_NR_rt_sigsuspend:
4244 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4246 target_to_host_sigset(&set, p);
4247 unlock_user(p, arg1, 0);
4248 ret = get_errno(sigsuspend(&set));
4251 case TARGET_NR_rt_sigtimedwait:
4254 struct timespec uts, *puts;
4257 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4259 target_to_host_sigset(&set, p);
4260 unlock_user(p, arg1, 0);
4263 target_to_host_timespec(puts, arg3);
4267 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4268 if (!is_error(ret) && arg2) {
4269 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4271 host_to_target_siginfo(p, &uinfo);
4272 unlock_user(p, arg2, sizeof(target_siginfo_t));
4276 case TARGET_NR_rt_sigqueueinfo:
4279 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4281 target_to_host_siginfo(&uinfo, p);
4282 unlock_user(p, arg1, 0);
4283 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4286 #ifdef TARGET_NR_sigreturn
4287 case TARGET_NR_sigreturn:
4288 /* NOTE: ret is eax, so not transcoding must be done */
4289 ret = do_sigreturn(cpu_env);
4292 case TARGET_NR_rt_sigreturn:
4293 /* NOTE: ret is eax, so not transcoding must be done */
4294 ret = do_rt_sigreturn(cpu_env);
4296 case TARGET_NR_sethostname:
4297 if (!(p = lock_user_string(arg1)))
4299 ret = get_errno(sethostname(p, arg2));
4300 unlock_user(p, arg1, 0);
4302 case TARGET_NR_setrlimit:
4304 /* XXX: convert resource ? */
4305 int resource = arg1;
4306 struct target_rlimit *target_rlim;
4308 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4310 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4311 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4312 unlock_user_struct(target_rlim, arg2, 0);
4313 ret = get_errno(setrlimit(resource, &rlim));
4316 case TARGET_NR_getrlimit:
4318 /* XXX: convert resource ? */
4319 int resource = arg1;
4320 struct target_rlimit *target_rlim;
4323 ret = get_errno(getrlimit(resource, &rlim));
4324 if (!is_error(ret)) {
4325 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4327 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4328 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4329 unlock_user_struct(target_rlim, arg2, 1);
4333 case TARGET_NR_getrusage:
4335 struct rusage rusage;
4336 ret = get_errno(getrusage(arg1, &rusage));
4337 if (!is_error(ret)) {
4338 host_to_target_rusage(arg2, &rusage);
4342 case TARGET_NR_gettimeofday:
4345 ret = get_errno(gettimeofday(&tv, NULL));
4346 if (!is_error(ret)) {
4347 if (copy_to_user_timeval(arg1, &tv))
4352 case TARGET_NR_settimeofday:
4355 if (copy_from_user_timeval(&tv, arg1))
4357 ret = get_errno(settimeofday(&tv, NULL));
4360 #ifdef TARGET_NR_select
4361 case TARGET_NR_select:
4363 struct target_sel_arg_struct *sel;
4364 abi_ulong inp, outp, exp, tvp;
4367 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4369 nsel = tswapl(sel->n);
4370 inp = tswapl(sel->inp);
4371 outp = tswapl(sel->outp);
4372 exp = tswapl(sel->exp);
4373 tvp = tswapl(sel->tvp);
4374 unlock_user_struct(sel, arg1, 0);
4375 ret = do_select(nsel, inp, outp, exp, tvp);
4379 case TARGET_NR_symlink:
4382 p = lock_user_string(arg1);
4383 p2 = lock_user_string(arg2);
4385 ret = -TARGET_EFAULT;
4387 ret = get_errno(symlink(p, p2));
4388 unlock_user(p2, arg2, 0);
4389 unlock_user(p, arg1, 0);
4392 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4393 case TARGET_NR_symlinkat:
4396 p = lock_user_string(arg1);
4397 p2 = lock_user_string(arg3);
4399 ret = -TARGET_EFAULT;
4401 ret = get_errno(sys_symlinkat(p, arg2, p2));
4402 unlock_user(p2, arg3, 0);
4403 unlock_user(p, arg1, 0);
4407 #ifdef TARGET_NR_oldlstat
4408 case TARGET_NR_oldlstat:
4411 case TARGET_NR_readlink:
4414 p = lock_user_string(arg1);
4415 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4417 ret = -TARGET_EFAULT;
4419 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
4420 char real[PATH_MAX];
4421 temp = realpath(exec_path,real);
4422 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
4423 snprintf((char *)p2, arg3, "%s", real);
4426 ret = get_errno(readlink(path(p), p2, arg3));
4429 unlock_user(p2, arg2, ret);
4430 unlock_user(p, arg1, 0);
4433 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4434 case TARGET_NR_readlinkat:
4437 p = lock_user_string(arg2);
4438 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4440 ret = -TARGET_EFAULT;
4442 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4443 unlock_user(p2, arg3, ret);
4444 unlock_user(p, arg2, 0);
4448 #ifdef TARGET_NR_uselib
4449 case TARGET_NR_uselib:
4452 #ifdef TARGET_NR_swapon
4453 case TARGET_NR_swapon:
4454 if (!(p = lock_user_string(arg1)))
4456 ret = get_errno(swapon(p, arg2));
4457 unlock_user(p, arg1, 0);
4460 case TARGET_NR_reboot:
4462 #ifdef TARGET_NR_readdir
4463 case TARGET_NR_readdir:
4466 #ifdef TARGET_NR_mmap
4467 case TARGET_NR_mmap:
4468 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4471 abi_ulong v1, v2, v3, v4, v5, v6;
4472 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4480 unlock_user(v, arg1, 0);
4481 ret = get_errno(target_mmap(v1, v2, v3,
4482 target_to_host_bitmask(v4, mmap_flags_tbl),
4486 ret = get_errno(target_mmap(arg1, arg2, arg3,
4487 target_to_host_bitmask(arg4, mmap_flags_tbl),
4493 #ifdef TARGET_NR_mmap2
4494 case TARGET_NR_mmap2:
4496 #define MMAP_SHIFT 12
4498 ret = get_errno(target_mmap(arg1, arg2, arg3,
4499 target_to_host_bitmask(arg4, mmap_flags_tbl),
4501 arg6 << MMAP_SHIFT));
4504 case TARGET_NR_munmap:
4505 ret = get_errno(target_munmap(arg1, arg2));
4507 case TARGET_NR_mprotect:
4508 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4510 #ifdef TARGET_NR_mremap
4511 case TARGET_NR_mremap:
4512 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4515 /* ??? msync/mlock/munlock are broken for softmmu. */
4516 #ifdef TARGET_NR_msync
4517 case TARGET_NR_msync:
4518 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4521 #ifdef TARGET_NR_mlock
4522 case TARGET_NR_mlock:
4523 ret = get_errno(mlock(g2h(arg1), arg2));
4526 #ifdef TARGET_NR_munlock
4527 case TARGET_NR_munlock:
4528 ret = get_errno(munlock(g2h(arg1), arg2));
4531 #ifdef TARGET_NR_mlockall
4532 case TARGET_NR_mlockall:
4533 ret = get_errno(mlockall(arg1));
4536 #ifdef TARGET_NR_munlockall
4537 case TARGET_NR_munlockall:
4538 ret = get_errno(munlockall());
4541 case TARGET_NR_truncate:
4542 if (!(p = lock_user_string(arg1)))
4544 ret = get_errno(truncate(p, arg2));
4545 unlock_user(p, arg1, 0);
4547 case TARGET_NR_ftruncate:
4548 ret = get_errno(ftruncate(arg1, arg2));
4550 case TARGET_NR_fchmod:
4551 ret = get_errno(fchmod(arg1, arg2));
4553 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4554 case TARGET_NR_fchmodat:
4555 if (!(p = lock_user_string(arg2)))
4557 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4558 unlock_user(p, arg2, 0);
4561 case TARGET_NR_getpriority:
4562 /* libc does special remapping of the return value of
4563 * sys_getpriority() so it's just easiest to call
4564 * sys_getpriority() directly rather than through libc. */
4565 ret = sys_getpriority(arg1, arg2);
4567 case TARGET_NR_setpriority:
4568 ret = get_errno(setpriority(arg1, arg2, arg3));
4570 #ifdef TARGET_NR_profil
4571 case TARGET_NR_profil:
4574 case TARGET_NR_statfs:
4575 if (!(p = lock_user_string(arg1)))
4577 ret = get_errno(statfs(path(p), &stfs));
4578 unlock_user(p, arg1, 0);
4580 if (!is_error(ret)) {
4581 struct target_statfs *target_stfs;
4583 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4585 __put_user(stfs.f_type, &target_stfs->f_type);
4586 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4587 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4588 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4589 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4590 __put_user(stfs.f_files, &target_stfs->f_files);
4591 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4592 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4593 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4594 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4595 unlock_user_struct(target_stfs, arg2, 1);
4598 case TARGET_NR_fstatfs:
4599 ret = get_errno(fstatfs(arg1, &stfs));
4600 goto convert_statfs;
4601 #ifdef TARGET_NR_statfs64
4602 case TARGET_NR_statfs64:
4603 if (!(p = lock_user_string(arg1)))
4605 ret = get_errno(statfs(path(p), &stfs));
4606 unlock_user(p, arg1, 0);
4608 if (!is_error(ret)) {
4609 struct target_statfs64 *target_stfs;
4611 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4613 __put_user(stfs.f_type, &target_stfs->f_type);
4614 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4615 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4616 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4617 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4618 __put_user(stfs.f_files, &target_stfs->f_files);
4619 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4620 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4621 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4622 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4623 unlock_user_struct(target_stfs, arg3, 1);
4626 case TARGET_NR_fstatfs64:
4627 ret = get_errno(fstatfs(arg1, &stfs));
4628 goto convert_statfs64;
4630 #ifdef TARGET_NR_ioperm
4631 case TARGET_NR_ioperm:
4634 #ifdef TARGET_NR_socketcall
4635 case TARGET_NR_socketcall:
4636 ret = do_socketcall(arg1, arg2);
4639 #ifdef TARGET_NR_accept
4640 case TARGET_NR_accept:
4641 ret = do_accept(arg1, arg2, arg3);
4644 #ifdef TARGET_NR_bind
4645 case TARGET_NR_bind:
4646 ret = do_bind(arg1, arg2, arg3);
4649 #ifdef TARGET_NR_connect
4650 case TARGET_NR_connect:
4651 ret = do_connect(arg1, arg2, arg3);
4654 #ifdef TARGET_NR_getpeername
4655 case TARGET_NR_getpeername:
4656 ret = do_getpeername(arg1, arg2, arg3);
4659 #ifdef TARGET_NR_getsockname
4660 case TARGET_NR_getsockname:
4661 ret = do_getsockname(arg1, arg2, arg3);
4664 #ifdef TARGET_NR_getsockopt
4665 case TARGET_NR_getsockopt:
4666 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4669 #ifdef TARGET_NR_listen
4670 case TARGET_NR_listen:
4671 ret = get_errno(listen(arg1, arg2));
4674 #ifdef TARGET_NR_recv
4675 case TARGET_NR_recv:
4676 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4679 #ifdef TARGET_NR_recvfrom
4680 case TARGET_NR_recvfrom:
4681 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4684 #ifdef TARGET_NR_recvmsg
4685 case TARGET_NR_recvmsg:
4686 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4689 #ifdef TARGET_NR_send
4690 case TARGET_NR_send:
4691 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4694 #ifdef TARGET_NR_sendmsg
4695 case TARGET_NR_sendmsg:
4696 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
4699 #ifdef TARGET_NR_sendto
4700 case TARGET_NR_sendto:
4701 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
4704 #ifdef TARGET_NR_shutdown
4705 case TARGET_NR_shutdown:
4706 ret = get_errno(shutdown(arg1, arg2));
4709 #ifdef TARGET_NR_socket
4710 case TARGET_NR_socket:
4711 ret = do_socket(arg1, arg2, arg3);
4714 #ifdef TARGET_NR_socketpair
4715 case TARGET_NR_socketpair:
4716 ret = do_socketpair(arg1, arg2, arg3, arg4);
4719 #ifdef TARGET_NR_setsockopt
4720 case TARGET_NR_setsockopt:
4721 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
4725 case TARGET_NR_syslog:
4726 if (!(p = lock_user_string(arg2)))
4728 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
4729 unlock_user(p, arg2, 0);
4732 case TARGET_NR_setitimer:
4734 struct itimerval value, ovalue, *pvalue;
4738 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
4739 || copy_from_user_timeval(&pvalue->it_value,
4740 arg2 + sizeof(struct target_timeval)))
4745 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
4746 if (!is_error(ret) && arg3) {
4747 if (copy_to_user_timeval(arg3,
4748 &ovalue.it_interval)
4749 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
4755 case TARGET_NR_getitimer:
4757 struct itimerval value;
4759 ret = get_errno(getitimer(arg1, &value));
4760 if (!is_error(ret) && arg2) {
4761 if (copy_to_user_timeval(arg2,
4763 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
4769 case TARGET_NR_stat:
4770 if (!(p = lock_user_string(arg1)))
4772 ret = get_errno(stat(path(p), &st));
4773 unlock_user(p, arg1, 0);
4775 case TARGET_NR_lstat:
4776 if (!(p = lock_user_string(arg1)))
4778 ret = get_errno(lstat(path(p), &st));
4779 unlock_user(p, arg1, 0);
4781 case TARGET_NR_fstat:
4783 ret = get_errno(fstat(arg1, &st));
4785 if (!is_error(ret)) {
4786 struct target_stat *target_st;
4788 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
4790 __put_user(st.st_dev, &target_st->st_dev);
4791 __put_user(st.st_ino, &target_st->st_ino);
4792 __put_user(st.st_mode, &target_st->st_mode);
4793 __put_user(st.st_uid, &target_st->st_uid);
4794 __put_user(st.st_gid, &target_st->st_gid);
4795 __put_user(st.st_nlink, &target_st->st_nlink);
4796 __put_user(st.st_rdev, &target_st->st_rdev);
4797 __put_user(st.st_size, &target_st->st_size);
4798 __put_user(st.st_blksize, &target_st->st_blksize);
4799 __put_user(st.st_blocks, &target_st->st_blocks);
4800 __put_user(st.st_atime, &target_st->target_st_atime);
4801 __put_user(st.st_mtime, &target_st->target_st_mtime);
4802 __put_user(st.st_ctime, &target_st->target_st_ctime);
4803 unlock_user_struct(target_st, arg2, 1);
4807 #ifdef TARGET_NR_olduname
4808 case TARGET_NR_olduname:
4811 #ifdef TARGET_NR_iopl
4812 case TARGET_NR_iopl:
4815 case TARGET_NR_vhangup:
4816 ret = get_errno(vhangup());
4818 #ifdef TARGET_NR_idle
4819 case TARGET_NR_idle:
4822 #ifdef TARGET_NR_syscall
4823 case TARGET_NR_syscall:
4824 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
4827 case TARGET_NR_wait4:
4830 abi_long status_ptr = arg2;
4831 struct rusage rusage, *rusage_ptr;
4832 abi_ulong target_rusage = arg4;
4834 rusage_ptr = &rusage;
4837 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
4838 if (!is_error(ret)) {
4840 if (put_user_s32(status, status_ptr))
4844 host_to_target_rusage(target_rusage, &rusage);
4848 #ifdef TARGET_NR_swapoff
4849 case TARGET_NR_swapoff:
4850 if (!(p = lock_user_string(arg1)))
4852 ret = get_errno(swapoff(p));
4853 unlock_user(p, arg1, 0);
4856 case TARGET_NR_sysinfo:
4858 struct target_sysinfo *target_value;
4859 struct sysinfo value;
4860 ret = get_errno(sysinfo(&value));
4861 if (!is_error(ret) && arg1)
4863 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
4865 __put_user(value.uptime, &target_value->uptime);
4866 __put_user(value.loads[0], &target_value->loads[0]);
4867 __put_user(value.loads[1], &target_value->loads[1]);
4868 __put_user(value.loads[2], &target_value->loads[2]);
4869 __put_user(value.totalram, &target_value->totalram);
4870 __put_user(value.freeram, &target_value->freeram);
4871 __put_user(value.sharedram, &target_value->sharedram);
4872 __put_user(value.bufferram, &target_value->bufferram);
4873 __put_user(value.totalswap, &target_value->totalswap);
4874 __put_user(value.freeswap, &target_value->freeswap);
4875 __put_user(value.procs, &target_value->procs);
4876 __put_user(value.totalhigh, &target_value->totalhigh);
4877 __put_user(value.freehigh, &target_value->freehigh);
4878 __put_user(value.mem_unit, &target_value->mem_unit);
4879 unlock_user_struct(target_value, arg1, 1);
4883 #ifdef TARGET_NR_ipc
4885 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
4889 #ifdef TARGET_NR_msgctl
4890 case TARGET_NR_msgctl:
4891 ret = do_msgctl(arg1, arg2, arg3);
4894 #ifdef TARGET_NR_msgget
4895 case TARGET_NR_msgget:
4896 ret = get_errno(msgget(arg1, arg2));
4899 #ifdef TARGET_NR_msgrcv
4900 case TARGET_NR_msgrcv:
4901 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
4904 #ifdef TARGET_NR_msgsnd
4905 case TARGET_NR_msgsnd:
4906 ret = do_msgsnd(arg1, arg2, arg3, arg4);
4909 case TARGET_NR_fsync:
4910 ret = get_errno(fsync(arg1));
4912 case TARGET_NR_clone:
4913 #if defined(TARGET_SH4)
4914 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
4915 #elif defined(TARGET_CRIS)
4916 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
4918 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
4921 #ifdef __NR_exit_group
4922 /* new thread calls */
4923 case TARGET_NR_exit_group:
4927 gdb_exit(cpu_env, arg1);
4928 ret = get_errno(exit_group(arg1));
4931 case TARGET_NR_setdomainname:
4932 if (!(p = lock_user_string(arg1)))
4934 ret = get_errno(setdomainname(p, arg2));
4935 unlock_user(p, arg1, 0);
4937 case TARGET_NR_uname:
4938 /* no need to transcode because we use the linux syscall */
4940 struct new_utsname * buf;
4942 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
4944 ret = get_errno(sys_uname(buf));
4945 if (!is_error(ret)) {
4946 /* Overrite the native machine name with whatever is being
4948 strcpy (buf->machine, UNAME_MACHINE);
4949 /* Allow the user to override the reported release. */
4950 if (qemu_uname_release && *qemu_uname_release)
4951 strcpy (buf->release, qemu_uname_release);
4953 unlock_user_struct(buf, arg1, 1);
4957 case TARGET_NR_modify_ldt:
4958 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
4960 #if !defined(TARGET_X86_64)
4961 case TARGET_NR_vm86old:
4963 case TARGET_NR_vm86:
4964 ret = do_vm86(cpu_env, arg1, arg2);
4968 case TARGET_NR_adjtimex:
4970 #ifdef TARGET_NR_create_module
4971 case TARGET_NR_create_module:
4973 case TARGET_NR_init_module:
4974 case TARGET_NR_delete_module:
4975 #ifdef TARGET_NR_get_kernel_syms
4976 case TARGET_NR_get_kernel_syms:
4979 case TARGET_NR_quotactl:
4981 case TARGET_NR_getpgid:
4982 ret = get_errno(getpgid(arg1));
4984 case TARGET_NR_fchdir:
4985 ret = get_errno(fchdir(arg1));
4987 #ifdef TARGET_NR_bdflush /* not on x86_64 */
4988 case TARGET_NR_bdflush:
4991 #ifdef TARGET_NR_sysfs
4992 case TARGET_NR_sysfs:
4995 case TARGET_NR_personality:
4996 ret = get_errno(personality(arg1));
4998 #ifdef TARGET_NR_afs_syscall
4999 case TARGET_NR_afs_syscall:
5002 #ifdef TARGET_NR__llseek /* Not on alpha */
5003 case TARGET_NR__llseek:
5005 #if defined (__x86_64__)
5006 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5007 if (put_user_s64(ret, arg4))
5011 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5012 if (put_user_s64(res, arg4))
5018 case TARGET_NR_getdents:
5019 #if TARGET_ABI_BITS != 32
5021 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5023 struct target_dirent *target_dirp;
5024 struct linux_dirent *dirp;
5025 abi_long count = arg3;
5027 dirp = malloc(count);
5029 ret = -TARGET_ENOMEM;
5033 ret = get_errno(sys_getdents(arg1, dirp, count));
5034 if (!is_error(ret)) {
5035 struct linux_dirent *de;
5036 struct target_dirent *tde;
5038 int reclen, treclen;
5039 int count1, tnamelen;
5043 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5047 reclen = de->d_reclen;
5048 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5049 tde->d_reclen = tswap16(treclen);
5050 tde->d_ino = tswapl(de->d_ino);
5051 tde->d_off = tswapl(de->d_off);
5052 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5055 /* XXX: may not be correct */
5056 pstrcpy(tde->d_name, tnamelen, de->d_name);
5057 de = (struct linux_dirent *)((char *)de + reclen);
5059 tde = (struct target_dirent *)((char *)tde + treclen);
5063 unlock_user(target_dirp, arg2, ret);
5069 struct linux_dirent *dirp;
5070 abi_long count = arg3;
5072 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5074 ret = get_errno(sys_getdents(arg1, dirp, count));
5075 if (!is_error(ret)) {
5076 struct linux_dirent *de;
5081 reclen = de->d_reclen;
5084 de->d_reclen = tswap16(reclen);
5085 tswapls(&de->d_ino);
5086 tswapls(&de->d_off);
5087 de = (struct linux_dirent *)((char *)de + reclen);
5091 unlock_user(dirp, arg2, ret);
5095 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5096 case TARGET_NR_getdents64:
5098 struct linux_dirent64 *dirp;
5099 abi_long count = arg3;
5100 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5102 ret = get_errno(sys_getdents64(arg1, dirp, count));
5103 if (!is_error(ret)) {
5104 struct linux_dirent64 *de;
5109 reclen = de->d_reclen;
5112 de->d_reclen = tswap16(reclen);
5113 tswap64s((uint64_t *)&de->d_ino);
5114 tswap64s((uint64_t *)&de->d_off);
5115 de = (struct linux_dirent64 *)((char *)de + reclen);
5119 unlock_user(dirp, arg2, ret);
5122 #endif /* TARGET_NR_getdents64 */
5123 #ifdef TARGET_NR__newselect
5124 case TARGET_NR__newselect:
5125 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5128 #ifdef TARGET_NR_poll
5129 case TARGET_NR_poll:
5131 struct target_pollfd *target_pfd;
5132 unsigned int nfds = arg2;
5137 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5140 pfd = alloca(sizeof(struct pollfd) * nfds);
5141 for(i = 0; i < nfds; i++) {
5142 pfd[i].fd = tswap32(target_pfd[i].fd);
5143 pfd[i].events = tswap16(target_pfd[i].events);
5145 ret = get_errno(poll(pfd, nfds, timeout));
5146 if (!is_error(ret)) {
5147 for(i = 0; i < nfds; i++) {
5148 target_pfd[i].revents = tswap16(pfd[i].revents);
5150 ret += nfds * (sizeof(struct target_pollfd)
5151 - sizeof(struct pollfd));
5153 unlock_user(target_pfd, arg1, ret);
5157 case TARGET_NR_flock:
5158 /* NOTE: the flock constant seems to be the same for every
5160 ret = get_errno(flock(arg1, arg2));
5162 case TARGET_NR_readv:
5167 vec = alloca(count * sizeof(struct iovec));
5168 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5170 ret = get_errno(readv(arg1, vec, count));
5171 unlock_iovec(vec, arg2, count, 1);
5174 case TARGET_NR_writev:
5179 vec = alloca(count * sizeof(struct iovec));
5180 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5182 ret = get_errno(writev(arg1, vec, count));
5183 unlock_iovec(vec, arg2, count, 0);
5186 case TARGET_NR_getsid:
5187 ret = get_errno(getsid(arg1));
5189 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5190 case TARGET_NR_fdatasync:
5191 ret = get_errno(fdatasync(arg1));
5194 case TARGET_NR__sysctl:
5195 /* We don't implement this, but ENOTDIR is always a safe
5197 ret = -TARGET_ENOTDIR;
5199 case TARGET_NR_sched_setparam:
5201 struct sched_param *target_schp;
5202 struct sched_param schp;
5204 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5206 schp.sched_priority = tswap32(target_schp->sched_priority);
5207 unlock_user_struct(target_schp, arg2, 0);
5208 ret = get_errno(sched_setparam(arg1, &schp));
5211 case TARGET_NR_sched_getparam:
5213 struct sched_param *target_schp;
5214 struct sched_param schp;
5215 ret = get_errno(sched_getparam(arg1, &schp));
5216 if (!is_error(ret)) {
5217 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5219 target_schp->sched_priority = tswap32(schp.sched_priority);
5220 unlock_user_struct(target_schp, arg2, 1);
5224 case TARGET_NR_sched_setscheduler:
5226 struct sched_param *target_schp;
5227 struct sched_param schp;
5228 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5230 schp.sched_priority = tswap32(target_schp->sched_priority);
5231 unlock_user_struct(target_schp, arg3, 0);
5232 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5235 case TARGET_NR_sched_getscheduler:
5236 ret = get_errno(sched_getscheduler(arg1));
5238 case TARGET_NR_sched_yield:
5239 ret = get_errno(sched_yield());
5241 case TARGET_NR_sched_get_priority_max:
5242 ret = get_errno(sched_get_priority_max(arg1));
5244 case TARGET_NR_sched_get_priority_min:
5245 ret = get_errno(sched_get_priority_min(arg1));
5247 case TARGET_NR_sched_rr_get_interval:
5250 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5251 if (!is_error(ret)) {
5252 host_to_target_timespec(arg2, &ts);
5256 case TARGET_NR_nanosleep:
5258 struct timespec req, rem;
5259 target_to_host_timespec(&req, arg1);
5260 ret = get_errno(nanosleep(&req, &rem));
5261 if (is_error(ret) && arg2) {
5262 host_to_target_timespec(arg2, &rem);
5266 #ifdef TARGET_NR_query_module
5267 case TARGET_NR_query_module:
5270 #ifdef TARGET_NR_nfsservctl
5271 case TARGET_NR_nfsservctl:
5274 case TARGET_NR_prctl:
5277 case PR_GET_PDEATHSIG:
5280 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5281 if (!is_error(ret) && arg2
5282 && put_user_ual(deathsig, arg2))
5287 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5291 #ifdef TARGET_NR_arch_prctl
5292 case TARGET_NR_arch_prctl:
5293 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5294 ret = do_arch_prctl(cpu_env, arg1, arg2);
5300 #ifdef TARGET_NR_pread
5301 case TARGET_NR_pread:
5303 if (((CPUARMState *)cpu_env)->eabi)
5306 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5308 ret = get_errno(pread(arg1, p, arg3, arg4));
5309 unlock_user(p, arg2, ret);
5311 case TARGET_NR_pwrite:
5313 if (((CPUARMState *)cpu_env)->eabi)
5316 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5318 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5319 unlock_user(p, arg2, 0);
5322 #ifdef TARGET_NR_pread64
5323 case TARGET_NR_pread64:
5324 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5326 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5327 unlock_user(p, arg2, ret);
5329 case TARGET_NR_pwrite64:
5330 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5332 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5333 unlock_user(p, arg2, 0);
5336 case TARGET_NR_getcwd:
5337 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5339 ret = get_errno(sys_getcwd1(p, arg2));
5340 unlock_user(p, arg1, ret);
5342 case TARGET_NR_capget:
5344 case TARGET_NR_capset:
5346 case TARGET_NR_sigaltstack:
5347 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5348 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5349 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5354 case TARGET_NR_sendfile:
5356 #ifdef TARGET_NR_getpmsg
5357 case TARGET_NR_getpmsg:
5360 #ifdef TARGET_NR_putpmsg
5361 case TARGET_NR_putpmsg:
5364 #ifdef TARGET_NR_vfork
5365 case TARGET_NR_vfork:
5366 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5370 #ifdef TARGET_NR_ugetrlimit
5371 case TARGET_NR_ugetrlimit:
5374 ret = get_errno(getrlimit(arg1, &rlim));
5375 if (!is_error(ret)) {
5376 struct target_rlimit *target_rlim;
5377 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5379 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5380 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5381 unlock_user_struct(target_rlim, arg2, 1);
5386 #ifdef TARGET_NR_truncate64
5387 case TARGET_NR_truncate64:
5388 if (!(p = lock_user_string(arg1)))
5390 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5391 unlock_user(p, arg1, 0);
5394 #ifdef TARGET_NR_ftruncate64
5395 case TARGET_NR_ftruncate64:
5396 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5399 #ifdef TARGET_NR_stat64
5400 case TARGET_NR_stat64:
5401 if (!(p = lock_user_string(arg1)))
5403 ret = get_errno(stat(path(p), &st));
5404 unlock_user(p, arg1, 0);
5406 ret = host_to_target_stat64(cpu_env, arg2, &st);
5409 #ifdef TARGET_NR_lstat64
5410 case TARGET_NR_lstat64:
5411 if (!(p = lock_user_string(arg1)))
5413 ret = get_errno(lstat(path(p), &st));
5414 unlock_user(p, arg1, 0);
5416 ret = host_to_target_stat64(cpu_env, arg2, &st);
5419 #ifdef TARGET_NR_fstat64
5420 case TARGET_NR_fstat64:
5421 ret = get_errno(fstat(arg1, &st));
5423 ret = host_to_target_stat64(cpu_env, arg2, &st);
5426 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
5427 case TARGET_NR_fstatat64:
5428 if (!(p = lock_user_string(arg2)))
5430 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5432 ret = host_to_target_stat64(cpu_env, arg3, &st);
5436 case TARGET_NR_lchown:
5437 if (!(p = lock_user_string(arg1)))
5439 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5440 unlock_user(p, arg1, 0);
5442 case TARGET_NR_getuid:
5443 ret = get_errno(high2lowuid(getuid()));
5445 case TARGET_NR_getgid:
5446 ret = get_errno(high2lowgid(getgid()));
5448 case TARGET_NR_geteuid:
5449 ret = get_errno(high2lowuid(geteuid()));
5451 case TARGET_NR_getegid:
5452 ret = get_errno(high2lowgid(getegid()));
5454 case TARGET_NR_setreuid:
5455 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5457 case TARGET_NR_setregid:
5458 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5460 case TARGET_NR_getgroups:
5462 int gidsetsize = arg1;
5463 uint16_t *target_grouplist;
5467 grouplist = alloca(gidsetsize * sizeof(gid_t));
5468 ret = get_errno(getgroups(gidsetsize, grouplist));
5469 if (gidsetsize == 0)
5471 if (!is_error(ret)) {
5472 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5473 if (!target_grouplist)
5475 for(i = 0;i < ret; i++)
5476 target_grouplist[i] = tswap16(grouplist[i]);
5477 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5481 case TARGET_NR_setgroups:
5483 int gidsetsize = arg1;
5484 uint16_t *target_grouplist;
5488 grouplist = alloca(gidsetsize * sizeof(gid_t));
5489 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5490 if (!target_grouplist) {
5491 ret = -TARGET_EFAULT;
5494 for(i = 0;i < gidsetsize; i++)
5495 grouplist[i] = tswap16(target_grouplist[i]);
5496 unlock_user(target_grouplist, arg2, 0);
5497 ret = get_errno(setgroups(gidsetsize, grouplist));
5500 case TARGET_NR_fchown:
5501 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5503 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5504 case TARGET_NR_fchownat:
5505 if (!(p = lock_user_string(arg2)))
5507 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5508 unlock_user(p, arg2, 0);
5511 #ifdef TARGET_NR_setresuid
5512 case TARGET_NR_setresuid:
5513 ret = get_errno(setresuid(low2highuid(arg1),
5515 low2highuid(arg3)));
5518 #ifdef TARGET_NR_getresuid
5519 case TARGET_NR_getresuid:
5521 uid_t ruid, euid, suid;
5522 ret = get_errno(getresuid(&ruid, &euid, &suid));
5523 if (!is_error(ret)) {
5524 if (put_user_u16(high2lowuid(ruid), arg1)
5525 || put_user_u16(high2lowuid(euid), arg2)
5526 || put_user_u16(high2lowuid(suid), arg3))
5532 #ifdef TARGET_NR_getresgid
5533 case TARGET_NR_setresgid:
5534 ret = get_errno(setresgid(low2highgid(arg1),
5536 low2highgid(arg3)));
5539 #ifdef TARGET_NR_getresgid
5540 case TARGET_NR_getresgid:
5542 gid_t rgid, egid, sgid;
5543 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5544 if (!is_error(ret)) {
5545 if (put_user_u16(high2lowgid(rgid), arg1)
5546 || put_user_u16(high2lowgid(egid), arg2)
5547 || put_user_u16(high2lowgid(sgid), arg3))
5553 case TARGET_NR_chown:
5554 if (!(p = lock_user_string(arg1)))
5556 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5557 unlock_user(p, arg1, 0);
5559 case TARGET_NR_setuid:
5560 ret = get_errno(setuid(low2highuid(arg1)));
5562 case TARGET_NR_setgid:
5563 ret = get_errno(setgid(low2highgid(arg1)));
5565 case TARGET_NR_setfsuid:
5566 ret = get_errno(setfsuid(arg1));
5568 case TARGET_NR_setfsgid:
5569 ret = get_errno(setfsgid(arg1));
5571 #endif /* USE_UID16 */
5573 #ifdef TARGET_NR_lchown32
5574 case TARGET_NR_lchown32:
5575 if (!(p = lock_user_string(arg1)))
5577 ret = get_errno(lchown(p, arg2, arg3));
5578 unlock_user(p, arg1, 0);
5581 #ifdef TARGET_NR_getuid32
5582 case TARGET_NR_getuid32:
5583 ret = get_errno(getuid());
5587 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5588 /* Alpha specific */
5589 case TARGET_NR_getxuid:
5593 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
5595 ret = get_errno(getuid());
5598 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5599 /* Alpha specific */
5600 case TARGET_NR_getxgid:
5604 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
5606 ret = get_errno(getgid());
5610 #ifdef TARGET_NR_getgid32
5611 case TARGET_NR_getgid32:
5612 ret = get_errno(getgid());
5615 #ifdef TARGET_NR_geteuid32
5616 case TARGET_NR_geteuid32:
5617 ret = get_errno(geteuid());
5620 #ifdef TARGET_NR_getegid32
5621 case TARGET_NR_getegid32:
5622 ret = get_errno(getegid());
5625 #ifdef TARGET_NR_setreuid32
5626 case TARGET_NR_setreuid32:
5627 ret = get_errno(setreuid(arg1, arg2));
5630 #ifdef TARGET_NR_setregid32
5631 case TARGET_NR_setregid32:
5632 ret = get_errno(setregid(arg1, arg2));
5635 #ifdef TARGET_NR_getgroups32
5636 case TARGET_NR_getgroups32:
5638 int gidsetsize = arg1;
5639 uint32_t *target_grouplist;
5643 grouplist = alloca(gidsetsize * sizeof(gid_t));
5644 ret = get_errno(getgroups(gidsetsize, grouplist));
5645 if (gidsetsize == 0)
5647 if (!is_error(ret)) {
5648 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
5649 if (!target_grouplist) {
5650 ret = -TARGET_EFAULT;
5653 for(i = 0;i < ret; i++)
5654 target_grouplist[i] = tswap32(grouplist[i]);
5655 unlock_user(target_grouplist, arg2, gidsetsize * 4);
5660 #ifdef TARGET_NR_setgroups32
5661 case TARGET_NR_setgroups32:
5663 int gidsetsize = arg1;
5664 uint32_t *target_grouplist;
5668 grouplist = alloca(gidsetsize * sizeof(gid_t));
5669 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
5670 if (!target_grouplist) {
5671 ret = -TARGET_EFAULT;
5674 for(i = 0;i < gidsetsize; i++)
5675 grouplist[i] = tswap32(target_grouplist[i]);
5676 unlock_user(target_grouplist, arg2, 0);
5677 ret = get_errno(setgroups(gidsetsize, grouplist));
5681 #ifdef TARGET_NR_fchown32
5682 case TARGET_NR_fchown32:
5683 ret = get_errno(fchown(arg1, arg2, arg3));
5686 #ifdef TARGET_NR_setresuid32
5687 case TARGET_NR_setresuid32:
5688 ret = get_errno(setresuid(arg1, arg2, arg3));
5691 #ifdef TARGET_NR_getresuid32
5692 case TARGET_NR_getresuid32:
5694 uid_t ruid, euid, suid;
5695 ret = get_errno(getresuid(&ruid, &euid, &suid));
5696 if (!is_error(ret)) {
5697 if (put_user_u32(ruid, arg1)
5698 || put_user_u32(euid, arg2)
5699 || put_user_u32(suid, arg3))
5705 #ifdef TARGET_NR_setresgid32
5706 case TARGET_NR_setresgid32:
5707 ret = get_errno(setresgid(arg1, arg2, arg3));
5710 #ifdef TARGET_NR_getresgid32
5711 case TARGET_NR_getresgid32:
5713 gid_t rgid, egid, sgid;
5714 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5715 if (!is_error(ret)) {
5716 if (put_user_u32(rgid, arg1)
5717 || put_user_u32(egid, arg2)
5718 || put_user_u32(sgid, arg3))
5724 #ifdef TARGET_NR_chown32
5725 case TARGET_NR_chown32:
5726 if (!(p = lock_user_string(arg1)))
5728 ret = get_errno(chown(p, arg2, arg3));
5729 unlock_user(p, arg1, 0);
5732 #ifdef TARGET_NR_setuid32
5733 case TARGET_NR_setuid32:
5734 ret = get_errno(setuid(arg1));
5737 #ifdef TARGET_NR_setgid32
5738 case TARGET_NR_setgid32:
5739 ret = get_errno(setgid(arg1));
5742 #ifdef TARGET_NR_setfsuid32
5743 case TARGET_NR_setfsuid32:
5744 ret = get_errno(setfsuid(arg1));
5747 #ifdef TARGET_NR_setfsgid32
5748 case TARGET_NR_setfsgid32:
5749 ret = get_errno(setfsgid(arg1));
5753 case TARGET_NR_pivot_root:
5755 #ifdef TARGET_NR_mincore
5756 case TARGET_NR_mincore:
5759 ret = -TARGET_EFAULT;
5760 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
5762 if (!(p = lock_user_string(arg3)))
5764 ret = get_errno(mincore(a, arg2, p));
5765 unlock_user(p, arg3, ret);
5767 unlock_user(a, arg1, 0);
5771 #ifdef TARGET_NR_arm_fadvise64_64
5772 case TARGET_NR_arm_fadvise64_64:
5775 * arm_fadvise64_64 looks like fadvise64_64 but
5776 * with different argument order
5784 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
5785 #ifdef TARGET_NR_fadvise64_64
5786 case TARGET_NR_fadvise64_64:
5788 /* This is a hint, so ignoring and returning success is ok. */
5792 #ifdef TARGET_NR_madvise
5793 case TARGET_NR_madvise:
5794 /* A straight passthrough may not be safe because qemu sometimes
5795 turns private flie-backed mappings into anonymous mappings.
5796 This will break MADV_DONTNEED.
5797 This is a hint, so ignoring and returning success is ok. */
5801 #if TARGET_ABI_BITS == 32
5802 case TARGET_NR_fcntl64:
5806 struct target_flock64 *target_fl;
5808 struct target_eabi_flock64 *target_efl;
5812 case TARGET_F_GETLK64:
5815 case TARGET_F_SETLK64:
5818 case TARGET_F_SETLKW64:
5827 case TARGET_F_GETLK64:
5829 if (((CPUARMState *)cpu_env)->eabi) {
5830 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5832 fl.l_type = tswap16(target_efl->l_type);
5833 fl.l_whence = tswap16(target_efl->l_whence);
5834 fl.l_start = tswap64(target_efl->l_start);
5835 fl.l_len = tswap64(target_efl->l_len);
5836 fl.l_pid = tswapl(target_efl->l_pid);
5837 unlock_user_struct(target_efl, arg3, 0);
5841 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5843 fl.l_type = tswap16(target_fl->l_type);
5844 fl.l_whence = tswap16(target_fl->l_whence);
5845 fl.l_start = tswap64(target_fl->l_start);
5846 fl.l_len = tswap64(target_fl->l_len);
5847 fl.l_pid = tswapl(target_fl->l_pid);
5848 unlock_user_struct(target_fl, arg3, 0);
5850 ret = get_errno(fcntl(arg1, cmd, &fl));
5853 if (((CPUARMState *)cpu_env)->eabi) {
5854 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
5856 target_efl->l_type = tswap16(fl.l_type);
5857 target_efl->l_whence = tswap16(fl.l_whence);
5858 target_efl->l_start = tswap64(fl.l_start);
5859 target_efl->l_len = tswap64(fl.l_len);
5860 target_efl->l_pid = tswapl(fl.l_pid);
5861 unlock_user_struct(target_efl, arg3, 1);
5865 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
5867 target_fl->l_type = tswap16(fl.l_type);
5868 target_fl->l_whence = tswap16(fl.l_whence);
5869 target_fl->l_start = tswap64(fl.l_start);
5870 target_fl->l_len = tswap64(fl.l_len);
5871 target_fl->l_pid = tswapl(fl.l_pid);
5872 unlock_user_struct(target_fl, arg3, 1);
5877 case TARGET_F_SETLK64:
5878 case TARGET_F_SETLKW64:
5880 if (((CPUARMState *)cpu_env)->eabi) {
5881 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5883 fl.l_type = tswap16(target_efl->l_type);
5884 fl.l_whence = tswap16(target_efl->l_whence);
5885 fl.l_start = tswap64(target_efl->l_start);
5886 fl.l_len = tswap64(target_efl->l_len);
5887 fl.l_pid = tswapl(target_efl->l_pid);
5888 unlock_user_struct(target_efl, arg3, 0);
5892 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5894 fl.l_type = tswap16(target_fl->l_type);
5895 fl.l_whence = tswap16(target_fl->l_whence);
5896 fl.l_start = tswap64(target_fl->l_start);
5897 fl.l_len = tswap64(target_fl->l_len);
5898 fl.l_pid = tswapl(target_fl->l_pid);
5899 unlock_user_struct(target_fl, arg3, 0);
5901 ret = get_errno(fcntl(arg1, cmd, &fl));
5904 ret = do_fcntl(arg1, cmd, arg3);
5910 #ifdef TARGET_NR_cacheflush
5911 case TARGET_NR_cacheflush:
5912 /* self-modifying code is handled automatically, so nothing needed */
5916 #ifdef TARGET_NR_security
5917 case TARGET_NR_security:
5920 #ifdef TARGET_NR_getpagesize
5921 case TARGET_NR_getpagesize:
5922 ret = TARGET_PAGE_SIZE;
5925 case TARGET_NR_gettid:
5926 ret = get_errno(gettid());
5928 #ifdef TARGET_NR_readahead
5929 case TARGET_NR_readahead:
5930 #if TARGET_ABI_BITS == 32
5932 if (((CPUARMState *)cpu_env)->eabi)
5939 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
5941 ret = get_errno(readahead(arg1, arg2, arg3));
5945 #ifdef TARGET_NR_setxattr
5946 case TARGET_NR_setxattr:
5947 case TARGET_NR_lsetxattr:
5948 case TARGET_NR_fsetxattr:
5949 case TARGET_NR_getxattr:
5950 case TARGET_NR_lgetxattr:
5951 case TARGET_NR_fgetxattr:
5952 case TARGET_NR_listxattr:
5953 case TARGET_NR_llistxattr:
5954 case TARGET_NR_flistxattr:
5955 case TARGET_NR_removexattr:
5956 case TARGET_NR_lremovexattr:
5957 case TARGET_NR_fremovexattr:
5958 goto unimplemented_nowarn;
5960 #ifdef TARGET_NR_set_thread_area
5961 case TARGET_NR_set_thread_area:
5962 #if defined(TARGET_MIPS)
5963 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
5966 #elif defined(TARGET_CRIS)
5968 ret = -TARGET_EINVAL;
5970 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
5974 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
5975 ret = do_set_thread_area(cpu_env, arg1);
5978 goto unimplemented_nowarn;
5981 #ifdef TARGET_NR_get_thread_area
5982 case TARGET_NR_get_thread_area:
5983 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5984 ret = do_get_thread_area(cpu_env, arg1);
5986 goto unimplemented_nowarn;
5989 #ifdef TARGET_NR_getdomainname
5990 case TARGET_NR_getdomainname:
5991 goto unimplemented_nowarn;
5994 #ifdef TARGET_NR_clock_gettime
5995 case TARGET_NR_clock_gettime:
5998 ret = get_errno(clock_gettime(arg1, &ts));
5999 if (!is_error(ret)) {
6000 host_to_target_timespec(arg2, &ts);
6005 #ifdef TARGET_NR_clock_getres
6006 case TARGET_NR_clock_getres:
6009 ret = get_errno(clock_getres(arg1, &ts));
6010 if (!is_error(ret)) {
6011 host_to_target_timespec(arg2, &ts);
6016 #ifdef TARGET_NR_clock_nanosleep
6017 case TARGET_NR_clock_nanosleep:
6020 target_to_host_timespec(&ts, arg3);
6021 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6023 host_to_target_timespec(arg4, &ts);
6028 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6029 case TARGET_NR_set_tid_address:
6030 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6034 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6035 case TARGET_NR_tkill:
6036 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6040 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6041 case TARGET_NR_tgkill:
6042 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6043 target_to_host_signal(arg3)));
6047 #ifdef TARGET_NR_set_robust_list
6048 case TARGET_NR_set_robust_list:
6049 goto unimplemented_nowarn;
6052 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6053 case TARGET_NR_utimensat:
6055 struct timespec ts[2];
6056 target_to_host_timespec(ts, arg3);
6057 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6059 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6061 if (!(p = lock_user_string(arg2))) {
6062 ret = -TARGET_EFAULT;
6065 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6066 unlock_user(p, arg2, 0);
6071 #if defined(USE_NPTL)
6072 case TARGET_NR_futex:
6073 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6076 #ifdef TARGET_NR_inotify_init
6077 case TARGET_NR_inotify_init:
6078 ret = get_errno(sys_inotify_init());
6081 #ifdef TARGET_NR_inotify_add_watch
6082 case TARGET_NR_inotify_add_watch:
6083 p = lock_user_string(arg2);
6084 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6085 unlock_user(p, arg2, 0);
6088 #ifdef TARGET_NR_inotify_rm_watch
6089 case TARGET_NR_inotify_rm_watch:
6090 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6096 gemu_log("qemu: Unsupported syscall: %d\n", num);
6097 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6098 unimplemented_nowarn:
6100 ret = -TARGET_ENOSYS;
6105 gemu_log(" = %ld\n", ret);
6108 print_syscall_ret(num, ret);
6111 ret = -TARGET_EFAULT;