4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
32 #include <sys/types.h>
38 #include <sys/mount.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
45 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <qemu-common.h>
62 #define termios host_termios
63 #define winsize host_winsize
64 #define termio host_termio
65 #define sgttyb host_sgttyb /* same as target */
66 #define tchars host_tchars /* same as target */
67 #define ltchars host_ltchars /* same as target */
69 #include <linux/termios.h>
70 #include <linux/unistd.h>
71 #include <linux/utsname.h>
72 #include <linux/cdrom.h>
73 #include <linux/hdreg.h>
74 #include <linux/soundcard.h>
76 #include <linux/mtio.h>
77 #include "linux_loop.h"
80 #include "qemu-common.h"
83 #include <linux/futex.h>
84 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
85 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
87 /* XXX: Hardcode the above values. */
88 #define CLONE_NPTL_FLAGS2 0
93 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
94 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
95 /* 16 bit uid wrappers emulation */
99 //#include <linux/msdos_fs.h>
100 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
101 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
112 #define _syscall0(type,name) \
113 static type name (void) \
115 return syscall(__NR_##name); \
118 #define _syscall1(type,name,type1,arg1) \
119 static type name (type1 arg1) \
121 return syscall(__NR_##name, arg1); \
124 #define _syscall2(type,name,type1,arg1,type2,arg2) \
125 static type name (type1 arg1,type2 arg2) \
127 return syscall(__NR_##name, arg1, arg2); \
130 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
131 static type name (type1 arg1,type2 arg2,type3 arg3) \
133 return syscall(__NR_##name, arg1, arg2, arg3); \
136 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
137 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
139 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
142 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
144 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
146 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
150 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
151 type5,arg5,type6,arg6) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
155 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
159 #define __NR_sys_exit __NR_exit
160 #define __NR_sys_uname __NR_uname
161 #define __NR_sys_faccessat __NR_faccessat
162 #define __NR_sys_fchmodat __NR_fchmodat
163 #define __NR_sys_fchownat __NR_fchownat
164 #define __NR_sys_fstatat64 __NR_fstatat64
165 #define __NR_sys_futimesat __NR_futimesat
166 #define __NR_sys_getcwd1 __NR_getcwd
167 #define __NR_sys_getdents __NR_getdents
168 #define __NR_sys_getdents64 __NR_getdents64
169 #define __NR_sys_getpriority __NR_getpriority
170 #define __NR_sys_linkat __NR_linkat
171 #define __NR_sys_mkdirat __NR_mkdirat
172 #define __NR_sys_mknodat __NR_mknodat
173 #define __NR_sys_newfstatat __NR_newfstatat
174 #define __NR_sys_openat __NR_openat
175 #define __NR_sys_readlinkat __NR_readlinkat
176 #define __NR_sys_renameat __NR_renameat
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_symlinkat __NR_symlinkat
179 #define __NR_sys_syslog __NR_syslog
180 #define __NR_sys_tgkill __NR_tgkill
181 #define __NR_sys_tkill __NR_tkill
182 #define __NR_sys_unlinkat __NR_unlinkat
183 #define __NR_sys_utimensat __NR_utimensat
184 #define __NR_sys_futex __NR_futex
185 #define __NR_sys_inotify_init __NR_inotify_init
186 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
187 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
189 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
190 #define __NR__llseek __NR_lseek
194 _syscall0(int, gettid)
196 /* This is a replacement for the host gettid() and must return a host
198 static int gettid(void) {
202 _syscall1(int,sys_exit,int,status)
203 _syscall1(int,sys_uname,struct new_utsname *,buf)
204 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
205 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
207 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
208 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
209 mode_t,mode,int,flags)
211 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
212 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
213 uid_t,owner,gid_t,group,int,flags)
215 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
216 defined(__NR_fstatat64)
217 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
218 struct stat *,buf,int,flags)
220 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
221 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
222 const struct timeval *,times)
224 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
225 #if TARGET_ABI_BITS == 32
226 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
228 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
229 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
231 _syscall2(int, sys_getpriority, int, which, int, who);
232 #if !defined (__x86_64__)
233 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
234 loff_t *, res, uint, wh);
236 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
237 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
238 int,newdirfd,const char *,newpath,int,flags)
240 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
241 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
243 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
244 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
245 mode_t,mode,dev_t,dev)
247 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
248 defined(__NR_newfstatat)
249 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
250 struct stat *,buf,int,flags)
252 #if defined(TARGET_NR_openat) && defined(__NR_openat)
253 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
255 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
256 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
257 char *,buf,size_t,bufsize)
259 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
260 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
261 int,newdirfd,const char *,newpath)
263 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
264 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
265 _syscall3(int,sys_symlinkat,const char *,oldpath,
266 int,newdirfd,const char *,newpath)
268 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
269 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
270 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
272 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
273 _syscall2(int,sys_tkill,int,tid,int,sig)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group,int,error_code)
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address,int *,tidptr)
281 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
282 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
284 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
285 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
286 const struct timespec *,tsp,int,flags)
288 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
289 _syscall0(int,sys_inotify_init)
291 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
292 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
294 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
295 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
297 #if defined(USE_NPTL)
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300 const struct timespec *,timeout,int *,uaddr2,int,val3)
304 extern int personality(int);
305 extern int flock(int, int);
306 extern int setfsuid(int);
307 extern int setfsgid(int);
308 extern int setgroups(int, gid_t *);
310 #define ERRNO_TABLE_SIZE 1200
312 /* target_to_host_errno_table[] is initialized from
313 * host_to_target_errno_table[] in syscall_init(). */
314 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
318 * This list is the union of errno values overridden in asm-<arch>/errno.h
319 * minus the errnos that are not actually generic to all archs.
321 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
322 [EIDRM] = TARGET_EIDRM,
323 [ECHRNG] = TARGET_ECHRNG,
324 [EL2NSYNC] = TARGET_EL2NSYNC,
325 [EL3HLT] = TARGET_EL3HLT,
326 [EL3RST] = TARGET_EL3RST,
327 [ELNRNG] = TARGET_ELNRNG,
328 [EUNATCH] = TARGET_EUNATCH,
329 [ENOCSI] = TARGET_ENOCSI,
330 [EL2HLT] = TARGET_EL2HLT,
331 [EDEADLK] = TARGET_EDEADLK,
332 [ENOLCK] = TARGET_ENOLCK,
333 [EBADE] = TARGET_EBADE,
334 [EBADR] = TARGET_EBADR,
335 [EXFULL] = TARGET_EXFULL,
336 [ENOANO] = TARGET_ENOANO,
337 [EBADRQC] = TARGET_EBADRQC,
338 [EBADSLT] = TARGET_EBADSLT,
339 [EBFONT] = TARGET_EBFONT,
340 [ENOSTR] = TARGET_ENOSTR,
341 [ENODATA] = TARGET_ENODATA,
342 [ETIME] = TARGET_ETIME,
343 [ENOSR] = TARGET_ENOSR,
344 [ENONET] = TARGET_ENONET,
345 [ENOPKG] = TARGET_ENOPKG,
346 [EREMOTE] = TARGET_EREMOTE,
347 [ENOLINK] = TARGET_ENOLINK,
348 [EADV] = TARGET_EADV,
349 [ESRMNT] = TARGET_ESRMNT,
350 [ECOMM] = TARGET_ECOMM,
351 [EPROTO] = TARGET_EPROTO,
352 [EDOTDOT] = TARGET_EDOTDOT,
353 [EMULTIHOP] = TARGET_EMULTIHOP,
354 [EBADMSG] = TARGET_EBADMSG,
355 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
356 [EOVERFLOW] = TARGET_EOVERFLOW,
357 [ENOTUNIQ] = TARGET_ENOTUNIQ,
358 [EBADFD] = TARGET_EBADFD,
359 [EREMCHG] = TARGET_EREMCHG,
360 [ELIBACC] = TARGET_ELIBACC,
361 [ELIBBAD] = TARGET_ELIBBAD,
362 [ELIBSCN] = TARGET_ELIBSCN,
363 [ELIBMAX] = TARGET_ELIBMAX,
364 [ELIBEXEC] = TARGET_ELIBEXEC,
365 [EILSEQ] = TARGET_EILSEQ,
366 [ENOSYS] = TARGET_ENOSYS,
367 [ELOOP] = TARGET_ELOOP,
368 [ERESTART] = TARGET_ERESTART,
369 [ESTRPIPE] = TARGET_ESTRPIPE,
370 [ENOTEMPTY] = TARGET_ENOTEMPTY,
371 [EUSERS] = TARGET_EUSERS,
372 [ENOTSOCK] = TARGET_ENOTSOCK,
373 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
374 [EMSGSIZE] = TARGET_EMSGSIZE,
375 [EPROTOTYPE] = TARGET_EPROTOTYPE,
376 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
377 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
378 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
379 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
380 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
381 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
382 [EADDRINUSE] = TARGET_EADDRINUSE,
383 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
384 [ENETDOWN] = TARGET_ENETDOWN,
385 [ENETUNREACH] = TARGET_ENETUNREACH,
386 [ENETRESET] = TARGET_ENETRESET,
387 [ECONNABORTED] = TARGET_ECONNABORTED,
388 [ECONNRESET] = TARGET_ECONNRESET,
389 [ENOBUFS] = TARGET_ENOBUFS,
390 [EISCONN] = TARGET_EISCONN,
391 [ENOTCONN] = TARGET_ENOTCONN,
392 [EUCLEAN] = TARGET_EUCLEAN,
393 [ENOTNAM] = TARGET_ENOTNAM,
394 [ENAVAIL] = TARGET_ENAVAIL,
395 [EISNAM] = TARGET_EISNAM,
396 [EREMOTEIO] = TARGET_EREMOTEIO,
397 [ESHUTDOWN] = TARGET_ESHUTDOWN,
398 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
399 [ETIMEDOUT] = TARGET_ETIMEDOUT,
400 [ECONNREFUSED] = TARGET_ECONNREFUSED,
401 [EHOSTDOWN] = TARGET_EHOSTDOWN,
402 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
403 [EALREADY] = TARGET_EALREADY,
404 [EINPROGRESS] = TARGET_EINPROGRESS,
405 [ESTALE] = TARGET_ESTALE,
406 [ECANCELED] = TARGET_ECANCELED,
407 [ENOMEDIUM] = TARGET_ENOMEDIUM,
408 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
410 [ENOKEY] = TARGET_ENOKEY,
413 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
416 [EKEYREVOKED] = TARGET_EKEYREVOKED,
419 [EKEYREJECTED] = TARGET_EKEYREJECTED,
422 [EOWNERDEAD] = TARGET_EOWNERDEAD,
424 #ifdef ENOTRECOVERABLE
425 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
429 static inline int host_to_target_errno(int err)
431 if(host_to_target_errno_table[err])
432 return host_to_target_errno_table[err];
436 static inline int target_to_host_errno(int err)
438 if (target_to_host_errno_table[err])
439 return target_to_host_errno_table[err];
443 static inline abi_long get_errno(abi_long ret)
446 return -host_to_target_errno(errno);
451 static inline int is_error(abi_long ret)
453 return (abi_ulong)ret >= (abi_ulong)(-4096);
456 char *target_strerror(int err)
458 return strerror(target_to_host_errno(err));
461 static abi_ulong target_brk;
462 static abi_ulong target_original_brk;
464 void target_set_brk(abi_ulong new_brk)
466 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
469 /* do_brk() must return target values and target errnos. */
470 abi_long do_brk(abi_ulong new_brk)
473 abi_long mapped_addr;
478 if (new_brk < target_original_brk)
481 brk_page = HOST_PAGE_ALIGN(target_brk);
483 /* If the new brk is less than this, set it and we're done... */
484 if (new_brk < brk_page) {
485 target_brk = new_brk;
489 /* We need to allocate more memory after the brk... */
490 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
491 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
492 PROT_READ|PROT_WRITE,
493 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
495 if (!is_error(mapped_addr))
496 target_brk = new_brk;
501 static inline abi_long copy_from_user_fdset(fd_set *fds,
502 abi_ulong target_fds_addr,
506 abi_ulong b, *target_fds;
508 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
509 if (!(target_fds = lock_user(VERIFY_READ,
511 sizeof(abi_ulong) * nw,
513 return -TARGET_EFAULT;
517 for (i = 0; i < nw; i++) {
518 /* grab the abi_ulong */
519 __get_user(b, &target_fds[i]);
520 for (j = 0; j < TARGET_ABI_BITS; j++) {
521 /* check the bit inside the abi_ulong */
528 unlock_user(target_fds, target_fds_addr, 0);
533 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
539 abi_ulong *target_fds;
541 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
542 if (!(target_fds = lock_user(VERIFY_WRITE,
544 sizeof(abi_ulong) * nw,
546 return -TARGET_EFAULT;
549 for (i = 0; i < nw; i++) {
551 for (j = 0; j < TARGET_ABI_BITS; j++) {
552 v |= ((FD_ISSET(k, fds) != 0) << j);
555 __put_user(v, &target_fds[i]);
558 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
563 #if defined(__alpha__)
569 static inline abi_long host_to_target_clock_t(long ticks)
571 #if HOST_HZ == TARGET_HZ
574 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
578 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
579 const struct rusage *rusage)
581 struct target_rusage *target_rusage;
583 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
584 return -TARGET_EFAULT;
585 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
586 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
587 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
588 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
589 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
590 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
591 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
592 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
593 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
594 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
595 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
596 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
597 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
598 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
599 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
600 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
601 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
602 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
603 unlock_user_struct(target_rusage, target_addr, 1);
608 static inline abi_long copy_from_user_timeval(struct timeval *tv,
609 abi_ulong target_tv_addr)
611 struct target_timeval *target_tv;
613 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
614 return -TARGET_EFAULT;
616 __get_user(tv->tv_sec, &target_tv->tv_sec);
617 __get_user(tv->tv_usec, &target_tv->tv_usec);
619 unlock_user_struct(target_tv, target_tv_addr, 0);
624 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
625 const struct timeval *tv)
627 struct target_timeval *target_tv;
629 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
630 return -TARGET_EFAULT;
632 __put_user(tv->tv_sec, &target_tv->tv_sec);
633 __put_user(tv->tv_usec, &target_tv->tv_usec);
635 unlock_user_struct(target_tv, target_tv_addr, 1);
641 /* do_select() must return target values and target errnos. */
642 static abi_long do_select(int n,
643 abi_ulong rfd_addr, abi_ulong wfd_addr,
644 abi_ulong efd_addr, abi_ulong target_tv_addr)
646 fd_set rfds, wfds, efds;
647 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
648 struct timeval tv, *tv_ptr;
652 if (copy_from_user_fdset(&rfds, rfd_addr, n))
653 return -TARGET_EFAULT;
659 if (copy_from_user_fdset(&wfds, wfd_addr, n))
660 return -TARGET_EFAULT;
666 if (copy_from_user_fdset(&efds, efd_addr, n))
667 return -TARGET_EFAULT;
673 if (target_tv_addr) {
674 if (copy_from_user_timeval(&tv, target_tv_addr))
675 return -TARGET_EFAULT;
681 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
683 if (!is_error(ret)) {
684 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
685 return -TARGET_EFAULT;
686 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
687 return -TARGET_EFAULT;
688 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
689 return -TARGET_EFAULT;
691 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
692 return -TARGET_EFAULT;
698 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
699 abi_ulong target_addr,
702 struct target_sockaddr *target_saddr;
704 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
706 return -TARGET_EFAULT;
707 memcpy(addr, target_saddr, len);
708 addr->sa_family = tswap16(target_saddr->sa_family);
709 unlock_user(target_saddr, target_addr, 0);
714 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
715 struct sockaddr *addr,
718 struct target_sockaddr *target_saddr;
720 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
722 return -TARGET_EFAULT;
723 memcpy(target_saddr, addr, len);
724 target_saddr->sa_family = tswap16(addr->sa_family);
725 unlock_user(target_saddr, target_addr, len);
730 /* ??? Should this also swap msgh->name? */
731 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
732 struct target_msghdr *target_msgh)
734 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
735 abi_long msg_controllen;
736 abi_ulong target_cmsg_addr;
737 struct target_cmsghdr *target_cmsg;
740 msg_controllen = tswapl(target_msgh->msg_controllen);
741 if (msg_controllen < sizeof (struct target_cmsghdr))
743 target_cmsg_addr = tswapl(target_msgh->msg_control);
744 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
746 return -TARGET_EFAULT;
748 while (cmsg && target_cmsg) {
749 void *data = CMSG_DATA(cmsg);
750 void *target_data = TARGET_CMSG_DATA(target_cmsg);
752 int len = tswapl(target_cmsg->cmsg_len)
753 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
755 space += CMSG_SPACE(len);
756 if (space > msgh->msg_controllen) {
757 space -= CMSG_SPACE(len);
758 gemu_log("Host cmsg overflow\n");
762 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
763 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
764 cmsg->cmsg_len = CMSG_LEN(len);
766 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
767 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
768 memcpy(data, target_data, len);
770 int *fd = (int *)data;
771 int *target_fd = (int *)target_data;
772 int i, numfds = len / sizeof(int);
774 for (i = 0; i < numfds; i++)
775 fd[i] = tswap32(target_fd[i]);
778 cmsg = CMSG_NXTHDR(msgh, cmsg);
779 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
781 unlock_user(target_cmsg, target_cmsg_addr, 0);
783 msgh->msg_controllen = space;
787 /* ??? Should this also swap msgh->name? */
788 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
791 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
792 abi_long msg_controllen;
793 abi_ulong target_cmsg_addr;
794 struct target_cmsghdr *target_cmsg;
797 msg_controllen = tswapl(target_msgh->msg_controllen);
798 if (msg_controllen < sizeof (struct target_cmsghdr))
800 target_cmsg_addr = tswapl(target_msgh->msg_control);
801 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
803 return -TARGET_EFAULT;
805 while (cmsg && target_cmsg) {
806 void *data = CMSG_DATA(cmsg);
807 void *target_data = TARGET_CMSG_DATA(target_cmsg);
809 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
811 space += TARGET_CMSG_SPACE(len);
812 if (space > msg_controllen) {
813 space -= TARGET_CMSG_SPACE(len);
814 gemu_log("Target cmsg overflow\n");
818 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
819 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
820 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
822 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
823 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
824 memcpy(target_data, data, len);
826 int *fd = (int *)data;
827 int *target_fd = (int *)target_data;
828 int i, numfds = len / sizeof(int);
830 for (i = 0; i < numfds; i++)
831 target_fd[i] = tswap32(fd[i]);
834 cmsg = CMSG_NXTHDR(msgh, cmsg);
835 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
837 unlock_user(target_cmsg, target_cmsg_addr, space);
839 target_msgh->msg_controllen = tswapl(space);
843 /* do_setsockopt() Must return target values and target errnos. */
844 static abi_long do_setsockopt(int sockfd, int level, int optname,
845 abi_ulong optval_addr, socklen_t optlen)
852 /* TCP options all take an 'int' value. */
853 if (optlen < sizeof(uint32_t))
854 return -TARGET_EINVAL;
856 if (get_user_u32(val, optval_addr))
857 return -TARGET_EFAULT;
858 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
865 case IP_ROUTER_ALERT:
869 case IP_MTU_DISCOVER:
875 case IP_MULTICAST_TTL:
876 case IP_MULTICAST_LOOP:
878 if (optlen >= sizeof(uint32_t)) {
879 if (get_user_u32(val, optval_addr))
880 return -TARGET_EFAULT;
881 } else if (optlen >= 1) {
882 if (get_user_u8(val, optval_addr))
883 return -TARGET_EFAULT;
885 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
891 case TARGET_SOL_SOCKET:
893 /* Options with 'int' argument. */
894 case TARGET_SO_DEBUG:
897 case TARGET_SO_REUSEADDR:
898 optname = SO_REUSEADDR;
903 case TARGET_SO_ERROR:
906 case TARGET_SO_DONTROUTE:
907 optname = SO_DONTROUTE;
909 case TARGET_SO_BROADCAST:
910 optname = SO_BROADCAST;
912 case TARGET_SO_SNDBUF:
915 case TARGET_SO_RCVBUF:
918 case TARGET_SO_KEEPALIVE:
919 optname = SO_KEEPALIVE;
921 case TARGET_SO_OOBINLINE:
922 optname = SO_OOBINLINE;
924 case TARGET_SO_NO_CHECK:
925 optname = SO_NO_CHECK;
927 case TARGET_SO_PRIORITY:
928 optname = SO_PRIORITY;
931 case TARGET_SO_BSDCOMPAT:
932 optname = SO_BSDCOMPAT;
935 case TARGET_SO_PASSCRED:
936 optname = SO_PASSCRED;
938 case TARGET_SO_TIMESTAMP:
939 optname = SO_TIMESTAMP;
941 case TARGET_SO_RCVLOWAT:
942 optname = SO_RCVLOWAT;
944 case TARGET_SO_RCVTIMEO:
945 optname = SO_RCVTIMEO;
947 case TARGET_SO_SNDTIMEO:
948 optname = SO_SNDTIMEO;
954 if (optlen < sizeof(uint32_t))
955 return -TARGET_EINVAL;
957 if (get_user_u32(val, optval_addr))
958 return -TARGET_EFAULT;
959 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
963 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
964 ret = -TARGET_ENOPROTOOPT;
969 /* do_getsockopt() Must return target values and target errnos. */
970 static abi_long do_getsockopt(int sockfd, int level, int optname,
971 abi_ulong optval_addr, abi_ulong optlen)
978 case TARGET_SOL_SOCKET:
981 case TARGET_SO_LINGER:
982 case TARGET_SO_RCVTIMEO:
983 case TARGET_SO_SNDTIMEO:
984 case TARGET_SO_PEERCRED:
985 case TARGET_SO_PEERNAME:
986 /* These don't just return a single integer */
993 /* TCP options all take an 'int' value. */
995 if (get_user_u32(len, optlen))
996 return -TARGET_EFAULT;
998 return -TARGET_EINVAL;
1000 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1007 if (put_user_u32(val, optval_addr))
1008 return -TARGET_EFAULT;
1010 if (put_user_u8(val, optval_addr))
1011 return -TARGET_EFAULT;
1013 if (put_user_u32(len, optlen))
1014 return -TARGET_EFAULT;
1021 case IP_ROUTER_ALERT:
1025 case IP_MTU_DISCOVER:
1031 case IP_MULTICAST_TTL:
1032 case IP_MULTICAST_LOOP:
1033 if (get_user_u32(len, optlen))
1034 return -TARGET_EFAULT;
1036 return -TARGET_EINVAL;
1038 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1041 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1043 if (put_user_u32(len, optlen)
1044 || put_user_u8(val, optval_addr))
1045 return -TARGET_EFAULT;
1047 if (len > sizeof(int))
1049 if (put_user_u32(len, optlen)
1050 || put_user_u32(val, optval_addr))
1051 return -TARGET_EFAULT;
1055 ret = -TARGET_ENOPROTOOPT;
1061 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1063 ret = -TARGET_EOPNOTSUPP;
1070 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1071 * other lock functions have a return code of 0 for failure.
1073 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1074 int count, int copy)
1076 struct target_iovec *target_vec;
1080 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1082 return -TARGET_EFAULT;
1083 for(i = 0;i < count; i++) {
1084 base = tswapl(target_vec[i].iov_base);
1085 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1086 if (vec[i].iov_len != 0) {
1087 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1088 /* Don't check lock_user return value. We must call writev even
1089 if a element has invalid base address. */
1091 /* zero length pointer is ignored */
1092 vec[i].iov_base = NULL;
1095 unlock_user (target_vec, target_addr, 0);
1099 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1100 int count, int copy)
1102 struct target_iovec *target_vec;
1106 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1108 return -TARGET_EFAULT;
1109 for(i = 0;i < count; i++) {
1110 if (target_vec[i].iov_base) {
1111 base = tswapl(target_vec[i].iov_base);
1112 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1115 unlock_user (target_vec, target_addr, 0);
1120 /* do_socket() Must return target values and target errnos. */
1121 static abi_long do_socket(int domain, int type, int protocol)
1123 #if defined(TARGET_MIPS)
1125 case TARGET_SOCK_DGRAM:
1128 case TARGET_SOCK_STREAM:
1131 case TARGET_SOCK_RAW:
1134 case TARGET_SOCK_RDM:
1137 case TARGET_SOCK_SEQPACKET:
1138 type = SOCK_SEQPACKET;
1140 case TARGET_SOCK_PACKET:
1145 if (domain == PF_NETLINK)
1146 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1147 return get_errno(socket(domain, type, protocol));
1150 /* do_bind() Must return target values and target errnos. */
1151 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1154 void *addr = alloca(addrlen);
1156 target_to_host_sockaddr(addr, target_addr, addrlen);
1157 return get_errno(bind(sockfd, addr, addrlen));
1160 /* do_connect() Must return target values and target errnos. */
1161 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1164 void *addr = alloca(addrlen);
1166 target_to_host_sockaddr(addr, target_addr, addrlen);
1167 return get_errno(connect(sockfd, addr, addrlen));
1170 /* do_sendrecvmsg() Must return target values and target errnos. */
1171 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1172 int flags, int send)
1175 struct target_msghdr *msgp;
1179 abi_ulong target_vec;
1182 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1186 return -TARGET_EFAULT;
1187 if (msgp->msg_name) {
1188 msg.msg_namelen = tswap32(msgp->msg_namelen);
1189 msg.msg_name = alloca(msg.msg_namelen);
1190 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1193 msg.msg_name = NULL;
1194 msg.msg_namelen = 0;
1196 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1197 msg.msg_control = alloca(msg.msg_controllen);
1198 msg.msg_flags = tswap32(msgp->msg_flags);
1200 count = tswapl(msgp->msg_iovlen);
1201 vec = alloca(count * sizeof(struct iovec));
1202 target_vec = tswapl(msgp->msg_iov);
1203 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1204 msg.msg_iovlen = count;
1208 ret = target_to_host_cmsg(&msg, msgp);
1210 ret = get_errno(sendmsg(fd, &msg, flags));
1212 ret = get_errno(recvmsg(fd, &msg, flags));
1213 if (!is_error(ret)) {
1215 ret = host_to_target_cmsg(msgp, &msg);
1220 unlock_iovec(vec, target_vec, count, !send);
1221 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1225 /* do_accept() Must return target values and target errnos. */
1226 static abi_long do_accept(int fd, abi_ulong target_addr,
1227 abi_ulong target_addrlen_addr)
1233 if (get_user_u32(addrlen, target_addrlen_addr))
1234 return -TARGET_EFAULT;
1236 addr = alloca(addrlen);
1238 ret = get_errno(accept(fd, addr, &addrlen));
1239 if (!is_error(ret)) {
1240 host_to_target_sockaddr(target_addr, addr, addrlen);
1241 if (put_user_u32(addrlen, target_addrlen_addr))
1242 ret = -TARGET_EFAULT;
1247 /* do_getpeername() Must return target values and target errnos. */
1248 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1249 abi_ulong target_addrlen_addr)
1255 if (get_user_u32(addrlen, target_addrlen_addr))
1256 return -TARGET_EFAULT;
1258 addr = alloca(addrlen);
1260 ret = get_errno(getpeername(fd, addr, &addrlen));
1261 if (!is_error(ret)) {
1262 host_to_target_sockaddr(target_addr, addr, addrlen);
1263 if (put_user_u32(addrlen, target_addrlen_addr))
1264 ret = -TARGET_EFAULT;
1269 /* do_getsockname() Must return target values and target errnos. */
1270 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1271 abi_ulong target_addrlen_addr)
1277 if (get_user_u32(addrlen, target_addrlen_addr))
1278 return -TARGET_EFAULT;
1280 addr = alloca(addrlen);
1282 ret = get_errno(getsockname(fd, addr, &addrlen));
1283 if (!is_error(ret)) {
1284 host_to_target_sockaddr(target_addr, addr, addrlen);
1285 if (put_user_u32(addrlen, target_addrlen_addr))
1286 ret = -TARGET_EFAULT;
1291 /* do_socketpair() Must return target values and target errnos. */
1292 static abi_long do_socketpair(int domain, int type, int protocol,
1293 abi_ulong target_tab_addr)
1298 ret = get_errno(socketpair(domain, type, protocol, tab));
1299 if (!is_error(ret)) {
1300 if (put_user_s32(tab[0], target_tab_addr)
1301 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1302 ret = -TARGET_EFAULT;
1307 /* do_sendto() Must return target values and target errnos. */
1308 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1309 abi_ulong target_addr, socklen_t addrlen)
1315 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1317 return -TARGET_EFAULT;
1319 addr = alloca(addrlen);
1320 target_to_host_sockaddr(addr, target_addr, addrlen);
1321 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1323 ret = get_errno(send(fd, host_msg, len, flags));
1325 unlock_user(host_msg, msg, 0);
1329 /* do_recvfrom() Must return target values and target errnos. */
1330 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1331 abi_ulong target_addr,
1332 abi_ulong target_addrlen)
1339 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1341 return -TARGET_EFAULT;
1343 if (get_user_u32(addrlen, target_addrlen)) {
1344 ret = -TARGET_EFAULT;
1347 addr = alloca(addrlen);
1348 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1350 addr = NULL; /* To keep compiler quiet. */
1351 ret = get_errno(recv(fd, host_msg, len, flags));
1353 if (!is_error(ret)) {
1355 host_to_target_sockaddr(target_addr, addr, addrlen);
1356 if (put_user_u32(addrlen, target_addrlen)) {
1357 ret = -TARGET_EFAULT;
1361 unlock_user(host_msg, msg, len);
1364 unlock_user(host_msg, msg, 0);
1369 #ifdef TARGET_NR_socketcall
1370 /* do_socketcall() Must return target values and target errnos. */
1371 static abi_long do_socketcall(int num, abi_ulong vptr)
1374 const int n = sizeof(abi_ulong);
1379 int domain, type, protocol;
1381 if (get_user_s32(domain, vptr)
1382 || get_user_s32(type, vptr + n)
1383 || get_user_s32(protocol, vptr + 2 * n))
1384 return -TARGET_EFAULT;
1386 ret = do_socket(domain, type, protocol);
1392 abi_ulong target_addr;
1395 if (get_user_s32(sockfd, vptr)
1396 || get_user_ual(target_addr, vptr + n)
1397 || get_user_u32(addrlen, vptr + 2 * n))
1398 return -TARGET_EFAULT;
1400 ret = do_bind(sockfd, target_addr, addrlen);
1403 case SOCKOP_connect:
1406 abi_ulong target_addr;
1409 if (get_user_s32(sockfd, vptr)
1410 || get_user_ual(target_addr, vptr + n)
1411 || get_user_u32(addrlen, vptr + 2 * n))
1412 return -TARGET_EFAULT;
1414 ret = do_connect(sockfd, target_addr, addrlen);
1419 int sockfd, backlog;
1421 if (get_user_s32(sockfd, vptr)
1422 || get_user_s32(backlog, vptr + n))
1423 return -TARGET_EFAULT;
1425 ret = get_errno(listen(sockfd, backlog));
1431 abi_ulong target_addr, target_addrlen;
1433 if (get_user_s32(sockfd, vptr)
1434 || get_user_ual(target_addr, vptr + n)
1435 || get_user_u32(target_addrlen, vptr + 2 * n))
1436 return -TARGET_EFAULT;
1438 ret = do_accept(sockfd, target_addr, target_addrlen);
1441 case SOCKOP_getsockname:
1444 abi_ulong target_addr, target_addrlen;
1446 if (get_user_s32(sockfd, vptr)
1447 || get_user_ual(target_addr, vptr + n)
1448 || get_user_u32(target_addrlen, vptr + 2 * n))
1449 return -TARGET_EFAULT;
1451 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1454 case SOCKOP_getpeername:
1457 abi_ulong target_addr, target_addrlen;
1459 if (get_user_s32(sockfd, vptr)
1460 || get_user_ual(target_addr, vptr + n)
1461 || get_user_u32(target_addrlen, vptr + 2 * n))
1462 return -TARGET_EFAULT;
1464 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1467 case SOCKOP_socketpair:
1469 int domain, type, protocol;
1472 if (get_user_s32(domain, vptr)
1473 || get_user_s32(type, vptr + n)
1474 || get_user_s32(protocol, vptr + 2 * n)
1475 || get_user_ual(tab, vptr + 3 * n))
1476 return -TARGET_EFAULT;
1478 ret = do_socketpair(domain, type, protocol, tab);
1488 if (get_user_s32(sockfd, vptr)
1489 || get_user_ual(msg, vptr + n)
1490 || get_user_ual(len, vptr + 2 * n)
1491 || get_user_s32(flags, vptr + 3 * n))
1492 return -TARGET_EFAULT;
1494 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1504 if (get_user_s32(sockfd, vptr)
1505 || get_user_ual(msg, vptr + n)
1506 || get_user_ual(len, vptr + 2 * n)
1507 || get_user_s32(flags, vptr + 3 * n))
1508 return -TARGET_EFAULT;
1510 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1522 if (get_user_s32(sockfd, vptr)
1523 || get_user_ual(msg, vptr + n)
1524 || get_user_ual(len, vptr + 2 * n)
1525 || get_user_s32(flags, vptr + 3 * n)
1526 || get_user_ual(addr, vptr + 4 * n)
1527 || get_user_u32(addrlen, vptr + 5 * n))
1528 return -TARGET_EFAULT;
1530 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1533 case SOCKOP_recvfrom:
1542 if (get_user_s32(sockfd, vptr)
1543 || get_user_ual(msg, vptr + n)
1544 || get_user_ual(len, vptr + 2 * n)
1545 || get_user_s32(flags, vptr + 3 * n)
1546 || get_user_ual(addr, vptr + 4 * n)
1547 || get_user_u32(addrlen, vptr + 5 * n))
1548 return -TARGET_EFAULT;
1550 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1553 case SOCKOP_shutdown:
1557 if (get_user_s32(sockfd, vptr)
1558 || get_user_s32(how, vptr + n))
1559 return -TARGET_EFAULT;
1561 ret = get_errno(shutdown(sockfd, how));
1564 case SOCKOP_sendmsg:
1565 case SOCKOP_recvmsg:
1568 abi_ulong target_msg;
1571 if (get_user_s32(fd, vptr)
1572 || get_user_ual(target_msg, vptr + n)
1573 || get_user_s32(flags, vptr + 2 * n))
1574 return -TARGET_EFAULT;
1576 ret = do_sendrecvmsg(fd, target_msg, flags,
1577 (num == SOCKOP_sendmsg));
1580 case SOCKOP_setsockopt:
1588 if (get_user_s32(sockfd, vptr)
1589 || get_user_s32(level, vptr + n)
1590 || get_user_s32(optname, vptr + 2 * n)
1591 || get_user_ual(optval, vptr + 3 * n)
1592 || get_user_u32(optlen, vptr + 4 * n))
1593 return -TARGET_EFAULT;
1595 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1598 case SOCKOP_getsockopt:
1606 if (get_user_s32(sockfd, vptr)
1607 || get_user_s32(level, vptr + n)
1608 || get_user_s32(optname, vptr + 2 * n)
1609 || get_user_ual(optval, vptr + 3 * n)
1610 || get_user_u32(optlen, vptr + 4 * n))
1611 return -TARGET_EFAULT;
1613 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1617 gemu_log("Unsupported socketcall: %d\n", num);
1618 ret = -TARGET_ENOSYS;
1625 #define N_SHM_REGIONS 32
1627 static struct shm_region {
1630 } shm_regions[N_SHM_REGIONS];
1632 struct target_ipc_perm
1639 unsigned short int mode;
1640 unsigned short int __pad1;
1641 unsigned short int __seq;
1642 unsigned short int __pad2;
1643 abi_ulong __unused1;
1644 abi_ulong __unused2;
1647 struct target_semid_ds
1649 struct target_ipc_perm sem_perm;
1650 abi_ulong sem_otime;
1651 abi_ulong __unused1;
1652 abi_ulong sem_ctime;
1653 abi_ulong __unused2;
1654 abi_ulong sem_nsems;
1655 abi_ulong __unused3;
1656 abi_ulong __unused4;
1659 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1660 abi_ulong target_addr)
1662 struct target_ipc_perm *target_ip;
1663 struct target_semid_ds *target_sd;
1665 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1666 return -TARGET_EFAULT;
1667 target_ip=&(target_sd->sem_perm);
1668 host_ip->__key = tswapl(target_ip->__key);
1669 host_ip->uid = tswapl(target_ip->uid);
1670 host_ip->gid = tswapl(target_ip->gid);
1671 host_ip->cuid = tswapl(target_ip->cuid);
1672 host_ip->cgid = tswapl(target_ip->cgid);
1673 host_ip->mode = tswapl(target_ip->mode);
1674 unlock_user_struct(target_sd, target_addr, 0);
1678 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1679 struct ipc_perm *host_ip)
1681 struct target_ipc_perm *target_ip;
1682 struct target_semid_ds *target_sd;
1684 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1685 return -TARGET_EFAULT;
1686 target_ip = &(target_sd->sem_perm);
1687 target_ip->__key = tswapl(host_ip->__key);
1688 target_ip->uid = tswapl(host_ip->uid);
1689 target_ip->gid = tswapl(host_ip->gid);
1690 target_ip->cuid = tswapl(host_ip->cuid);
1691 target_ip->cgid = tswapl(host_ip->cgid);
1692 target_ip->mode = tswapl(host_ip->mode);
1693 unlock_user_struct(target_sd, target_addr, 1);
1697 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1698 abi_ulong target_addr)
1700 struct target_semid_ds *target_sd;
1702 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1703 return -TARGET_EFAULT;
1704 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
1705 return -TARGET_EFAULT;
1706 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1707 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1708 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1709 unlock_user_struct(target_sd, target_addr, 0);
1713 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1714 struct semid_ds *host_sd)
1716 struct target_semid_ds *target_sd;
1718 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1719 return -TARGET_EFAULT;
1720 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
1721 return -TARGET_EFAULT;;
1722 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1723 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1724 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1725 unlock_user_struct(target_sd, target_addr, 1);
1729 struct target_seminfo {
1742 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
1743 struct seminfo *host_seminfo)
1745 struct target_seminfo *target_seminfo;
1746 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
1747 return -TARGET_EFAULT;
1748 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
1749 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
1750 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
1751 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
1752 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
1753 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
1754 __put_user(host_seminfo->semume, &target_seminfo->semume);
1755 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
1756 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
1757 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
1758 unlock_user_struct(target_seminfo, target_addr, 1);
1764 struct semid_ds *buf;
1765 unsigned short *array;
1766 struct seminfo *__buf;
1769 union target_semun {
1776 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
1777 abi_ulong target_addr)
1780 unsigned short *array;
1782 struct semid_ds semid_ds;
1785 semun.buf = &semid_ds;
1787 ret = semctl(semid, 0, IPC_STAT, semun);
1789 return get_errno(ret);
1791 nsems = semid_ds.sem_nsems;
1793 *host_array = malloc(nsems*sizeof(unsigned short));
1794 array = lock_user(VERIFY_READ, target_addr,
1795 nsems*sizeof(unsigned short), 1);
1797 return -TARGET_EFAULT;
1799 for(i=0; i<nsems; i++) {
1800 __get_user((*host_array)[i], &array[i]);
1802 unlock_user(array, target_addr, 0);
1807 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
1808 unsigned short **host_array)
1811 unsigned short *array;
1813 struct semid_ds semid_ds;
1816 semun.buf = &semid_ds;
1818 ret = semctl(semid, 0, IPC_STAT, semun);
1820 return get_errno(ret);
1822 nsems = semid_ds.sem_nsems;
1824 array = lock_user(VERIFY_WRITE, target_addr,
1825 nsems*sizeof(unsigned short), 0);
1827 return -TARGET_EFAULT;
1829 for(i=0; i<nsems; i++) {
1830 __put_user((*host_array)[i], &array[i]);
1833 unlock_user(array, target_addr, 1);
1838 static inline abi_long do_semctl(int semid, int semnum, int cmd,
1839 union target_semun target_su)
1842 struct semid_ds dsarg;
1843 unsigned short *array;
1844 struct seminfo seminfo;
1845 abi_long ret = -TARGET_EINVAL;
1854 err = target_to_host_semid_ds(&dsarg, target_su.buf);
1858 ret = get_errno(semctl(semid, semnum, cmd, arg));
1859 err = host_to_target_semid_ds(target_su.buf, &dsarg);
1865 arg.val = tswapl(target_su.val);
1866 ret = get_errno(semctl(semid, semnum, cmd, arg));
1867 target_su.val = tswapl(arg.val);
1871 err = target_to_host_semarray(semid, &array, target_su.array);
1875 ret = get_errno(semctl(semid, semnum, cmd, arg));
1876 err = host_to_target_semarray(semid, target_su.array, &array);
1882 arg.__buf = &seminfo;
1883 ret = get_errno(semctl(semid, semnum, cmd, arg));
1884 err = host_to_target_seminfo(target_su.__buf, &seminfo);
1892 ret = get_errno(semctl(semid, semnum, cmd, NULL));
1899 struct target_sembuf {
1900 unsigned short sem_num;
1905 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
1906 abi_ulong target_addr,
1909 struct target_sembuf *target_sembuf;
1912 target_sembuf = lock_user(VERIFY_READ, target_addr,
1913 nsops*sizeof(struct target_sembuf), 1);
1915 return -TARGET_EFAULT;
1917 for(i=0; i<nsops; i++) {
1918 __put_user(target_sembuf[i].sem_num, &host_sembuf[i].sem_num);
1919 __put_user(target_sembuf[i].sem_op, &host_sembuf[i].sem_op);
1920 __put_user(target_sembuf[i].sem_flg, &host_sembuf[i].sem_flg);
1923 unlock_user(target_sembuf, target_addr, 0);
1928 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
1930 struct sembuf sops[nsops];
1932 if (target_to_host_sembuf(sops, ptr, nsops))
1933 return -TARGET_EFAULT;
1935 return semop(semid, sops, nsops);
1938 struct target_msqid_ds
1940 struct target_ipc_perm msg_perm;
1941 abi_ulong msg_stime;
1942 #if TARGET_ABI_BITS == 32
1943 abi_ulong __unused1;
1945 abi_ulong msg_rtime;
1946 #if TARGET_ABI_BITS == 32
1947 abi_ulong __unused2;
1949 abi_ulong msg_ctime;
1950 #if TARGET_ABI_BITS == 32
1951 abi_ulong __unused3;
1953 abi_ulong __msg_cbytes;
1955 abi_ulong msg_qbytes;
1956 abi_ulong msg_lspid;
1957 abi_ulong msg_lrpid;
1958 abi_ulong __unused4;
1959 abi_ulong __unused5;
1962 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
1963 abi_ulong target_addr)
1965 struct target_msqid_ds *target_md;
1967 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
1968 return -TARGET_EFAULT;
1969 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
1970 return -TARGET_EFAULT;
1971 host_md->msg_stime = tswapl(target_md->msg_stime);
1972 host_md->msg_rtime = tswapl(target_md->msg_rtime);
1973 host_md->msg_ctime = tswapl(target_md->msg_ctime);
1974 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
1975 host_md->msg_qnum = tswapl(target_md->msg_qnum);
1976 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
1977 host_md->msg_lspid = tswapl(target_md->msg_lspid);
1978 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
1979 unlock_user_struct(target_md, target_addr, 0);
1983 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
1984 struct msqid_ds *host_md)
1986 struct target_msqid_ds *target_md;
1988 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
1989 return -TARGET_EFAULT;
1990 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
1991 return -TARGET_EFAULT;
1992 target_md->msg_stime = tswapl(host_md->msg_stime);
1993 target_md->msg_rtime = tswapl(host_md->msg_rtime);
1994 target_md->msg_ctime = tswapl(host_md->msg_ctime);
1995 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
1996 target_md->msg_qnum = tswapl(host_md->msg_qnum);
1997 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
1998 target_md->msg_lspid = tswapl(host_md->msg_lspid);
1999 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2000 unlock_user_struct(target_md, target_addr, 1);
2004 struct target_msginfo {
2012 unsigned short int msgseg;
2015 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2016 struct msginfo *host_msginfo)
2018 struct target_msginfo *target_msginfo;
2019 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2020 return -TARGET_EFAULT;
2021 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2022 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2023 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2024 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2025 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2026 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2027 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2028 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2029 unlock_user_struct(target_msginfo, target_addr, 1);
2033 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2035 struct msqid_ds dsarg;
2036 struct msginfo msginfo;
2037 abi_long ret = -TARGET_EINVAL;
2045 if (target_to_host_msqid_ds(&dsarg,ptr))
2046 return -TARGET_EFAULT;
2047 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2048 if (host_to_target_msqid_ds(ptr,&dsarg))
2049 return -TARGET_EFAULT;
2052 ret = get_errno(msgctl(msgid, cmd, NULL));
2056 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2057 if (host_to_target_msginfo(ptr, &msginfo))
2058 return -TARGET_EFAULT;
2065 struct target_msgbuf {
2070 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2071 unsigned int msgsz, int msgflg)
2073 struct target_msgbuf *target_mb;
2074 struct msgbuf *host_mb;
2077 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2078 return -TARGET_EFAULT;
2079 host_mb = malloc(msgsz+sizeof(long));
2080 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2081 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2082 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2084 unlock_user_struct(target_mb, msgp, 0);
2089 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2090 unsigned int msgsz, abi_long msgtyp,
2093 struct target_msgbuf *target_mb;
2095 struct msgbuf *host_mb;
2098 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2099 return -TARGET_EFAULT;
2101 host_mb = malloc(msgsz+sizeof(long));
2102 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2105 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2106 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2107 if (!target_mtext) {
2108 ret = -TARGET_EFAULT;
2111 memcpy(target_mb->mtext, host_mb->mtext, ret);
2112 unlock_user(target_mtext, target_mtext_addr, ret);
2115 target_mb->mtype = tswapl(host_mb->mtype);
2120 unlock_user_struct(target_mb, msgp, 1);
2124 struct target_shmid_ds
2126 struct target_ipc_perm shm_perm;
2127 abi_ulong shm_segsz;
2128 abi_ulong shm_atime;
2129 #if TARGET_ABI_BITS == 32
2130 abi_ulong __unused1;
2132 abi_ulong shm_dtime;
2133 #if TARGET_ABI_BITS == 32
2134 abi_ulong __unused2;
2136 abi_ulong shm_ctime;
2137 #if TARGET_ABI_BITS == 32
2138 abi_ulong __unused3;
2142 abi_ulong shm_nattch;
2143 unsigned long int __unused4;
2144 unsigned long int __unused5;
2147 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2148 abi_ulong target_addr)
2150 struct target_shmid_ds *target_sd;
2152 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2153 return -TARGET_EFAULT;
2154 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2155 return -TARGET_EFAULT;
2156 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2157 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2158 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2159 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2160 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2161 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2162 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2163 unlock_user_struct(target_sd, target_addr, 0);
2167 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2168 struct shmid_ds *host_sd)
2170 struct target_shmid_ds *target_sd;
2172 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2173 return -TARGET_EFAULT;
2174 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2175 return -TARGET_EFAULT;
2176 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2177 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2178 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2179 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2180 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2181 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2182 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2183 unlock_user_struct(target_sd, target_addr, 1);
2187 struct target_shminfo {
2195 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2196 struct shminfo *host_shminfo)
2198 struct target_shminfo *target_shminfo;
2199 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2200 return -TARGET_EFAULT;
2201 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2202 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2203 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2204 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2205 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2206 unlock_user_struct(target_shminfo, target_addr, 1);
2210 struct target_shm_info {
2215 abi_ulong swap_attempts;
2216 abi_ulong swap_successes;
2219 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2220 struct shm_info *host_shm_info)
2222 struct target_shm_info *target_shm_info;
2223 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2224 return -TARGET_EFAULT;
2225 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2226 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2227 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2228 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2229 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2230 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2231 unlock_user_struct(target_shm_info, target_addr, 1);
2235 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2237 struct shmid_ds dsarg;
2238 struct shminfo shminfo;
2239 struct shm_info shm_info;
2240 abi_long ret = -TARGET_EINVAL;
2248 if (target_to_host_shmid_ds(&dsarg, buf))
2249 return -TARGET_EFAULT;
2250 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2251 if (host_to_target_shmid_ds(buf, &dsarg))
2252 return -TARGET_EFAULT;
2255 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2256 if (host_to_target_shminfo(buf, &shminfo))
2257 return -TARGET_EFAULT;
2260 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2261 if (host_to_target_shm_info(buf, &shm_info))
2262 return -TARGET_EFAULT;
2267 ret = get_errno(shmctl(shmid, cmd, NULL));
2274 static inline abi_long do_shmat(int shmid, abi_ulong shmaddr, int shmflg,
2275 unsigned long *raddr)
2278 struct shmid_ds shm_info;
2281 /* SHM_* flags are the same on all linux platforms */
2282 *raddr = (unsigned long) shmat(shmid, g2h(shmaddr), shmflg);
2285 return get_errno(*raddr);
2288 /* find out the length of the shared memory segment */
2289 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2290 if (is_error(ret)) {
2291 /* can't get length, bail out */
2292 shmdt((void *) *raddr);
2293 return get_errno(ret);
2296 page_set_flags(h2g(*raddr), h2g(*raddr) + shm_info.shm_segsz,
2297 PAGE_VALID | PAGE_READ |
2298 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2300 for (i = 0; i < N_SHM_REGIONS; i++) {
2301 if (shm_regions[i].start == 0) {
2302 shm_regions[i].start = h2g(*raddr);
2303 shm_regions[i].size = shm_info.shm_segsz;
2311 static inline abi_long do_shmdt(abi_ulong shmaddr)
2315 for (i = 0; i < N_SHM_REGIONS; ++i) {
2316 if (shm_regions[i].start == shmaddr) {
2317 shm_regions[i].start = 0;
2318 page_set_flags(shmaddr, shm_regions[i].size, 0);
2323 return get_errno(shmdt(g2h(shmaddr)));
2326 #ifdef TARGET_NR_ipc
2327 /* ??? This only works with linear mappings. */
2328 /* do_ipc() must return target values and target errnos. */
2329 static abi_long do_ipc(unsigned int call, int first,
2330 int second, int third,
2331 abi_long ptr, abi_long fifth)
2336 version = call >> 16;
2341 ret = do_semop(first, ptr, second);
2345 ret = get_errno(semget(first, second, third));
2349 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2353 ret = get_errno(msgget(first, second));
2357 ret = do_msgsnd(first, ptr, second, third);
2361 ret = do_msgctl(first, second, ptr);
2368 struct target_ipc_kludge {
2373 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2374 ret = -TARGET_EFAULT;
2378 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2380 unlock_user_struct(tmp, ptr, 0);
2384 ret = do_msgrcv(first, ptr, second, fifth, third);
2392 unsigned long raddr;
2394 ret = do_shmat(first, ptr, second, &raddr);
2398 ret = put_user_ual(raddr, third);
2402 ret = -TARGET_EINVAL;
2408 ret = do_shmdt(ptr);
2412 ret = get_errno(shmget(first, second, third));
2416 ret = do_shmctl(first, second, third);
2420 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2421 ret = -TARGET_ENOSYS;
2428 /* kernel structure types definitions */
2431 #define STRUCT(name, list...) STRUCT_ ## name,
2432 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2434 #include "syscall_types.h"
2437 #undef STRUCT_SPECIAL
2439 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2440 #define STRUCT_SPECIAL(name)
2441 #include "syscall_types.h"
2443 #undef STRUCT_SPECIAL
2445 typedef struct IOCTLEntry {
2446 unsigned int target_cmd;
2447 unsigned int host_cmd;
2450 const argtype arg_type[5];
2453 #define IOC_R 0x0001
2454 #define IOC_W 0x0002
2455 #define IOC_RW (IOC_R | IOC_W)
2457 #define MAX_STRUCT_SIZE 4096
2459 static IOCTLEntry ioctl_entries[] = {
2460 #define IOCTL(cmd, access, types...) \
2461 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2466 /* ??? Implement proper locking for ioctls. */
2467 /* do_ioctl() Must return target values and target errnos. */
2468 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2470 const IOCTLEntry *ie;
2471 const argtype *arg_type;
2473 uint8_t buf_temp[MAX_STRUCT_SIZE];
2479 if (ie->target_cmd == 0) {
2480 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2481 return -TARGET_ENOSYS;
2483 if (ie->target_cmd == cmd)
2487 arg_type = ie->arg_type;
2489 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2491 switch(arg_type[0]) {
2494 ret = get_errno(ioctl(fd, ie->host_cmd));
2499 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2503 target_size = thunk_type_size(arg_type, 0);
2504 switch(ie->access) {
2506 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2507 if (!is_error(ret)) {
2508 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2510 return -TARGET_EFAULT;
2511 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2512 unlock_user(argptr, arg, target_size);
2516 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2518 return -TARGET_EFAULT;
2519 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2520 unlock_user(argptr, arg, 0);
2521 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2525 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2527 return -TARGET_EFAULT;
2528 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2529 unlock_user(argptr, arg, 0);
2530 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2531 if (!is_error(ret)) {
2532 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2534 return -TARGET_EFAULT;
2535 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2536 unlock_user(argptr, arg, target_size);
2542 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2543 (long)cmd, arg_type[0]);
2544 ret = -TARGET_ENOSYS;
2550 static const bitmask_transtbl iflag_tbl[] = {
2551 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2552 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2553 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2554 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2555 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2556 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2557 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2558 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2559 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2560 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2561 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2562 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2563 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2564 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2568 static const bitmask_transtbl oflag_tbl[] = {
2569 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2570 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2571 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2572 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2573 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2574 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2575 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2576 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2577 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2578 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2579 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2580 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2581 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2582 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2583 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2584 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2585 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2586 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2587 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2588 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2589 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2590 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2591 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2592 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2596 static const bitmask_transtbl cflag_tbl[] = {
2597 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2598 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2599 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2600 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2601 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2602 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2603 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2604 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2605 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2606 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2607 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2608 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2609 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2610 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2611 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2612 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2613 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2614 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2615 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2616 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2617 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2618 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2619 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2620 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2621 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2622 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2623 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2624 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2625 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2626 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2627 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2631 static const bitmask_transtbl lflag_tbl[] = {
2632 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2633 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2634 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2635 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2636 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2637 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2638 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2639 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2640 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2641 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2642 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2643 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2644 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2645 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2646 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2650 static void target_to_host_termios (void *dst, const void *src)
2652 struct host_termios *host = dst;
2653 const struct target_termios *target = src;
2656 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2658 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2660 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2662 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2663 host->c_line = target->c_line;
2665 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2666 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2667 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2668 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2669 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2670 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2671 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2672 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2673 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2674 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2675 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2676 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2677 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2678 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2679 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2680 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2681 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2684 static void host_to_target_termios (void *dst, const void *src)
2686 struct target_termios *target = dst;
2687 const struct host_termios *host = src;
2690 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2692 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2694 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2696 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2697 target->c_line = host->c_line;
2699 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2700 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2701 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2702 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2703 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2704 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2705 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2706 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2707 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2708 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2709 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2710 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2711 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2712 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2713 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2714 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2715 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2718 static const StructEntry struct_termios_def = {
2719 .convert = { host_to_target_termios, target_to_host_termios },
2720 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2721 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2724 static bitmask_transtbl mmap_flags_tbl[] = {
2725 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2726 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2727 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2728 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2729 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2730 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2731 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2732 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2736 static bitmask_transtbl fcntl_flags_tbl[] = {
2737 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2738 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2739 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2740 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2741 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2742 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2743 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2744 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2745 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2746 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2747 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2748 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2749 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2750 #if defined(O_DIRECT)
2751 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2756 #if defined(TARGET_I386)
2758 /* NOTE: there is really one LDT for all the threads */
2759 static uint8_t *ldt_table;
2761 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2768 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2769 if (size > bytecount)
2771 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2773 return -TARGET_EFAULT;
2774 /* ??? Should this by byteswapped? */
2775 memcpy(p, ldt_table, size);
2776 unlock_user(p, ptr, size);
2780 /* XXX: add locking support */
2781 static abi_long write_ldt(CPUX86State *env,
2782 abi_ulong ptr, unsigned long bytecount, int oldmode)
2784 struct target_modify_ldt_ldt_s ldt_info;
2785 struct target_modify_ldt_ldt_s *target_ldt_info;
2786 int seg_32bit, contents, read_exec_only, limit_in_pages;
2787 int seg_not_present, useable, lm;
2788 uint32_t *lp, entry_1, entry_2;
2790 if (bytecount != sizeof(ldt_info))
2791 return -TARGET_EINVAL;
2792 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2793 return -TARGET_EFAULT;
2794 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2795 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2796 ldt_info.limit = tswap32(target_ldt_info->limit);
2797 ldt_info.flags = tswap32(target_ldt_info->flags);
2798 unlock_user_struct(target_ldt_info, ptr, 0);
2800 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2801 return -TARGET_EINVAL;
2802 seg_32bit = ldt_info.flags & 1;
2803 contents = (ldt_info.flags >> 1) & 3;
2804 read_exec_only = (ldt_info.flags >> 3) & 1;
2805 limit_in_pages = (ldt_info.flags >> 4) & 1;
2806 seg_not_present = (ldt_info.flags >> 5) & 1;
2807 useable = (ldt_info.flags >> 6) & 1;
2811 lm = (ldt_info.flags >> 7) & 1;
2813 if (contents == 3) {
2815 return -TARGET_EINVAL;
2816 if (seg_not_present == 0)
2817 return -TARGET_EINVAL;
2819 /* allocate the LDT */
2821 env->ldt.base = target_mmap(0,
2822 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
2823 PROT_READ|PROT_WRITE,
2824 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
2825 if (env->ldt.base == -1)
2826 return -TARGET_ENOMEM;
2827 memset(g2h(env->ldt.base), 0,
2828 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2829 env->ldt.limit = 0xffff;
2830 ldt_table = g2h(env->ldt.base);
2833 /* NOTE: same code as Linux kernel */
2834 /* Allow LDTs to be cleared by the user. */
2835 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2838 read_exec_only == 1 &&
2840 limit_in_pages == 0 &&
2841 seg_not_present == 1 &&
2849 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2850 (ldt_info.limit & 0x0ffff);
2851 entry_2 = (ldt_info.base_addr & 0xff000000) |
2852 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2853 (ldt_info.limit & 0xf0000) |
2854 ((read_exec_only ^ 1) << 9) |
2856 ((seg_not_present ^ 1) << 15) |
2858 (limit_in_pages << 23) |
2862 entry_2 |= (useable << 20);
2864 /* Install the new entry ... */
2866 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2867 lp[0] = tswap32(entry_1);
2868 lp[1] = tswap32(entry_2);
2872 /* specific and weird i386 syscalls */
2873 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2874 unsigned long bytecount)
2880 ret = read_ldt(ptr, bytecount);
2883 ret = write_ldt(env, ptr, bytecount, 1);
2886 ret = write_ldt(env, ptr, bytecount, 0);
2889 ret = -TARGET_ENOSYS;
2895 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2896 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2898 uint64_t *gdt_table = g2h(env->gdt.base);
2899 struct target_modify_ldt_ldt_s ldt_info;
2900 struct target_modify_ldt_ldt_s *target_ldt_info;
2901 int seg_32bit, contents, read_exec_only, limit_in_pages;
2902 int seg_not_present, useable, lm;
2903 uint32_t *lp, entry_1, entry_2;
2906 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2907 if (!target_ldt_info)
2908 return -TARGET_EFAULT;
2909 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2910 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2911 ldt_info.limit = tswap32(target_ldt_info->limit);
2912 ldt_info.flags = tswap32(target_ldt_info->flags);
2913 if (ldt_info.entry_number == -1) {
2914 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
2915 if (gdt_table[i] == 0) {
2916 ldt_info.entry_number = i;
2917 target_ldt_info->entry_number = tswap32(i);
2922 unlock_user_struct(target_ldt_info, ptr, 1);
2924 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
2925 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
2926 return -TARGET_EINVAL;
2927 seg_32bit = ldt_info.flags & 1;
2928 contents = (ldt_info.flags >> 1) & 3;
2929 read_exec_only = (ldt_info.flags >> 3) & 1;
2930 limit_in_pages = (ldt_info.flags >> 4) & 1;
2931 seg_not_present = (ldt_info.flags >> 5) & 1;
2932 useable = (ldt_info.flags >> 6) & 1;
2936 lm = (ldt_info.flags >> 7) & 1;
2939 if (contents == 3) {
2940 if (seg_not_present == 0)
2941 return -TARGET_EINVAL;
2944 /* NOTE: same code as Linux kernel */
2945 /* Allow LDTs to be cleared by the user. */
2946 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2947 if ((contents == 0 &&
2948 read_exec_only == 1 &&
2950 limit_in_pages == 0 &&
2951 seg_not_present == 1 &&
2959 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2960 (ldt_info.limit & 0x0ffff);
2961 entry_2 = (ldt_info.base_addr & 0xff000000) |
2962 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2963 (ldt_info.limit & 0xf0000) |
2964 ((read_exec_only ^ 1) << 9) |
2966 ((seg_not_present ^ 1) << 15) |
2968 (limit_in_pages << 23) |
2973 /* Install the new entry ... */
2975 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
2976 lp[0] = tswap32(entry_1);
2977 lp[1] = tswap32(entry_2);
2981 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
2983 struct target_modify_ldt_ldt_s *target_ldt_info;
2984 uint64_t *gdt_table = g2h(env->gdt.base);
2985 uint32_t base_addr, limit, flags;
2986 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
2987 int seg_not_present, useable, lm;
2988 uint32_t *lp, entry_1, entry_2;
2990 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2991 if (!target_ldt_info)
2992 return -TARGET_EFAULT;
2993 idx = tswap32(target_ldt_info->entry_number);
2994 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
2995 idx > TARGET_GDT_ENTRY_TLS_MAX) {
2996 unlock_user_struct(target_ldt_info, ptr, 1);
2997 return -TARGET_EINVAL;
2999 lp = (uint32_t *)(gdt_table + idx);
3000 entry_1 = tswap32(lp[0]);
3001 entry_2 = tswap32(lp[1]);
3003 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3004 contents = (entry_2 >> 10) & 3;
3005 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3006 seg_32bit = (entry_2 >> 22) & 1;
3007 limit_in_pages = (entry_2 >> 23) & 1;
3008 useable = (entry_2 >> 20) & 1;
3012 lm = (entry_2 >> 21) & 1;
3014 flags = (seg_32bit << 0) | (contents << 1) |
3015 (read_exec_only << 3) | (limit_in_pages << 4) |
3016 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3017 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3018 base_addr = (entry_1 >> 16) |
3019 (entry_2 & 0xff000000) |
3020 ((entry_2 & 0xff) << 16);
3021 target_ldt_info->base_addr = tswapl(base_addr);
3022 target_ldt_info->limit = tswap32(limit);
3023 target_ldt_info->flags = tswap32(flags);
3024 unlock_user_struct(target_ldt_info, ptr, 1);
3027 #endif /* TARGET_I386 && TARGET_ABI32 */
3029 #ifndef TARGET_ABI32
3030 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3037 case TARGET_ARCH_SET_GS:
3038 case TARGET_ARCH_SET_FS:
3039 if (code == TARGET_ARCH_SET_GS)
3043 cpu_x86_load_seg(env, idx, 0);
3044 env->segs[idx].base = addr;
3046 case TARGET_ARCH_GET_GS:
3047 case TARGET_ARCH_GET_FS:
3048 if (code == TARGET_ARCH_GET_GS)
3052 val = env->segs[idx].base;
3053 if (put_user(val, addr, abi_ulong))
3054 return -TARGET_EFAULT;
3057 ret = -TARGET_EINVAL;
3064 #endif /* defined(TARGET_I386) */
3066 #if defined(USE_NPTL)
3068 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3070 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3073 pthread_mutex_t mutex;
3074 pthread_cond_t cond;
3077 abi_ulong child_tidptr;
3078 abi_ulong parent_tidptr;
3082 static void *clone_func(void *arg)
3084 new_thread_info *info = arg;
3089 info->tid = gettid();
3090 if (info->child_tidptr)
3091 put_user_u32(info->tid, info->child_tidptr);
3092 if (info->parent_tidptr)
3093 put_user_u32(info->tid, info->parent_tidptr);
3094 /* Enable signals. */
3095 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3096 /* Signal to the parent that we're ready. */
3097 pthread_mutex_lock(&info->mutex);
3098 pthread_cond_broadcast(&info->cond);
3099 pthread_mutex_unlock(&info->mutex);
3100 /* Wait until the parent has finshed initializing the tls state. */
3101 pthread_mutex_lock(&clone_lock);
3102 pthread_mutex_unlock(&clone_lock);
3108 /* this stack is the equivalent of the kernel stack associated with a
3110 #define NEW_STACK_SIZE 8192
3112 static int clone_func(void *arg)
3114 CPUState *env = arg;
3121 /* do_fork() Must return host values and target errnos (unlike most
3122 do_*() functions). */
3123 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3124 abi_ulong parent_tidptr, target_ulong newtls,
3125 abi_ulong child_tidptr)
3131 #if defined(USE_NPTL)
3132 unsigned int nptl_flags;
3136 /* Emulate vfork() with fork() */
3137 if (flags & CLONE_VFORK)
3138 flags &= ~(CLONE_VFORK | CLONE_VM);
3140 if (flags & CLONE_VM) {
3141 #if defined(USE_NPTL)
3142 new_thread_info info;
3143 pthread_attr_t attr;
3145 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3146 init_task_state(ts);
3147 new_stack = ts->stack;
3148 /* we create a new CPU instance. */
3149 new_env = cpu_copy(env);
3150 /* Init regs that differ from the parent. */
3151 cpu_clone_regs(new_env, newsp);
3152 new_env->opaque = ts;
3153 #if defined(USE_NPTL)
3155 flags &= ~CLONE_NPTL_FLAGS2;
3157 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3158 if (nptl_flags & CLONE_SETTLS)
3159 cpu_set_tls (new_env, newtls);
3161 /* Grab a mutex so that thread setup appears atomic. */
3162 pthread_mutex_lock(&clone_lock);
3164 memset(&info, 0, sizeof(info));
3165 pthread_mutex_init(&info.mutex, NULL);
3166 pthread_mutex_lock(&info.mutex);
3167 pthread_cond_init(&info.cond, NULL);
3169 if (nptl_flags & CLONE_CHILD_SETTID)
3170 info.child_tidptr = child_tidptr;
3171 if (nptl_flags & CLONE_PARENT_SETTID)
3172 info.parent_tidptr = parent_tidptr;
3174 ret = pthread_attr_init(&attr);
3175 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3176 /* It is not safe to deliver signals until the child has finished
3177 initializing, so temporarily block all signals. */
3178 sigfillset(&sigmask);
3179 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3181 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3183 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3184 pthread_attr_destroy(&attr);
3186 /* Wait for the child to initialize. */
3187 pthread_cond_wait(&info.cond, &info.mutex);
3189 if (flags & CLONE_PARENT_SETTID)
3190 put_user_u32(ret, parent_tidptr);
3194 pthread_mutex_unlock(&info.mutex);
3195 pthread_cond_destroy(&info.cond);
3196 pthread_mutex_destroy(&info.mutex);
3197 pthread_mutex_unlock(&clone_lock);
3199 if (flags & CLONE_NPTL_FLAGS2)
3201 /* This is probably going to die very quickly, but do it anyway. */
3203 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3205 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3209 /* if no CLONE_VM, we consider it is a fork */
3210 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3215 /* Child Process. */
3216 cpu_clone_regs(env, newsp);
3218 #if defined(USE_NPTL)
3219 /* There is a race condition here. The parent process could
3220 theoretically read the TID in the child process before the child
3221 tid is set. This would require using either ptrace
3222 (not implemented) or having *_tidptr to point at a shared memory
3223 mapping. We can't repeat the spinlock hack used above because
3224 the child process gets its own copy of the lock. */
3225 if (flags & CLONE_CHILD_SETTID)
3226 put_user_u32(gettid(), child_tidptr);
3227 if (flags & CLONE_PARENT_SETTID)
3228 put_user_u32(gettid(), parent_tidptr);
3229 ts = (TaskState *)env->opaque;
3230 if (flags & CLONE_SETTLS)
3231 cpu_set_tls (env, newtls);
3232 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3241 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3244 struct target_flock *target_fl;
3245 struct flock64 fl64;
3246 struct target_flock64 *target_fl64;
3250 case TARGET_F_GETLK:
3251 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3252 return -TARGET_EFAULT;
3253 fl.l_type = tswap16(target_fl->l_type);
3254 fl.l_whence = tswap16(target_fl->l_whence);
3255 fl.l_start = tswapl(target_fl->l_start);
3256 fl.l_len = tswapl(target_fl->l_len);
3257 fl.l_pid = tswapl(target_fl->l_pid);
3258 unlock_user_struct(target_fl, arg, 0);
3259 ret = get_errno(fcntl(fd, cmd, &fl));
3261 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3262 return -TARGET_EFAULT;
3263 target_fl->l_type = tswap16(fl.l_type);
3264 target_fl->l_whence = tswap16(fl.l_whence);
3265 target_fl->l_start = tswapl(fl.l_start);
3266 target_fl->l_len = tswapl(fl.l_len);
3267 target_fl->l_pid = tswapl(fl.l_pid);
3268 unlock_user_struct(target_fl, arg, 1);
3272 case TARGET_F_SETLK:
3273 case TARGET_F_SETLKW:
3274 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3275 return -TARGET_EFAULT;
3276 fl.l_type = tswap16(target_fl->l_type);
3277 fl.l_whence = tswap16(target_fl->l_whence);
3278 fl.l_start = tswapl(target_fl->l_start);
3279 fl.l_len = tswapl(target_fl->l_len);
3280 fl.l_pid = tswapl(target_fl->l_pid);
3281 unlock_user_struct(target_fl, arg, 0);
3282 ret = get_errno(fcntl(fd, cmd, &fl));
3285 case TARGET_F_GETLK64:
3286 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3287 return -TARGET_EFAULT;
3288 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3289 fl64.l_whence = tswap16(target_fl64->l_whence);
3290 fl64.l_start = tswapl(target_fl64->l_start);
3291 fl64.l_len = tswapl(target_fl64->l_len);
3292 fl64.l_pid = tswap16(target_fl64->l_pid);
3293 unlock_user_struct(target_fl64, arg, 0);
3294 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3296 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3297 return -TARGET_EFAULT;
3298 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3299 target_fl64->l_whence = tswap16(fl64.l_whence);
3300 target_fl64->l_start = tswapl(fl64.l_start);
3301 target_fl64->l_len = tswapl(fl64.l_len);
3302 target_fl64->l_pid = tswapl(fl64.l_pid);
3303 unlock_user_struct(target_fl64, arg, 1);
3306 case TARGET_F_SETLK64:
3307 case TARGET_F_SETLKW64:
3308 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3309 return -TARGET_EFAULT;
3310 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3311 fl64.l_whence = tswap16(target_fl64->l_whence);
3312 fl64.l_start = tswapl(target_fl64->l_start);
3313 fl64.l_len = tswapl(target_fl64->l_len);
3314 fl64.l_pid = tswap16(target_fl64->l_pid);
3315 unlock_user_struct(target_fl64, arg, 0);
3316 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3320 ret = get_errno(fcntl(fd, cmd, arg));
3322 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3327 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3331 ret = get_errno(fcntl(fd, cmd, arg));
3339 static inline int high2lowuid(int uid)
3347 static inline int high2lowgid(int gid)
3355 static inline int low2highuid(int uid)
3357 if ((int16_t)uid == -1)
3363 static inline int low2highgid(int gid)
3365 if ((int16_t)gid == -1)
3371 #endif /* USE_UID16 */
3373 void syscall_init(void)
3376 const argtype *arg_type;
3380 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3381 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3382 #include "syscall_types.h"
3384 #undef STRUCT_SPECIAL
3386 /* we patch the ioctl size if necessary. We rely on the fact that
3387 no ioctl has all the bits at '1' in the size field */
3389 while (ie->target_cmd != 0) {
3390 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3391 TARGET_IOC_SIZEMASK) {
3392 arg_type = ie->arg_type;
3393 if (arg_type[0] != TYPE_PTR) {
3394 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3399 size = thunk_type_size(arg_type, 0);
3400 ie->target_cmd = (ie->target_cmd &
3401 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3402 (size << TARGET_IOC_SIZESHIFT);
3405 /* Build target_to_host_errno_table[] table from
3406 * host_to_target_errno_table[]. */
3407 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3408 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3410 /* automatic consistency check if same arch */
3411 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3412 (defined(__x86_64__) && defined(TARGET_X86_64))
3413 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3414 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3415 ie->name, ie->target_cmd, ie->host_cmd);
3422 #if TARGET_ABI_BITS == 32
3423 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3425 #ifdef TARGET_WORDS_BIGENDIAN
3426 return ((uint64_t)word0 << 32) | word1;
3428 return ((uint64_t)word1 << 32) | word0;
3431 #else /* TARGET_ABI_BITS == 32 */
3432 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3436 #endif /* TARGET_ABI_BITS != 32 */
3438 #ifdef TARGET_NR_truncate64
3439 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3445 if (((CPUARMState *)cpu_env)->eabi)
3451 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3455 #ifdef TARGET_NR_ftruncate64
3456 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3462 if (((CPUARMState *)cpu_env)->eabi)
3468 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3472 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3473 abi_ulong target_addr)
3475 struct target_timespec *target_ts;
3477 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3478 return -TARGET_EFAULT;
3479 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3480 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3481 unlock_user_struct(target_ts, target_addr, 0);
3485 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3486 struct timespec *host_ts)
3488 struct target_timespec *target_ts;
3490 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3491 return -TARGET_EFAULT;
3492 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3493 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3494 unlock_user_struct(target_ts, target_addr, 1);
3498 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3499 static inline abi_long host_to_target_stat64(void *cpu_env,
3500 abi_ulong target_addr,
3501 struct stat *host_st)
3504 if (((CPUARMState *)cpu_env)->eabi) {
3505 struct target_eabi_stat64 *target_st;
3507 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3508 return -TARGET_EFAULT;
3509 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3510 __put_user(host_st->st_dev, &target_st->st_dev);
3511 __put_user(host_st->st_ino, &target_st->st_ino);
3512 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3513 __put_user(host_st->st_ino, &target_st->__st_ino);
3515 __put_user(host_st->st_mode, &target_st->st_mode);
3516 __put_user(host_st->st_nlink, &target_st->st_nlink);
3517 __put_user(host_st->st_uid, &target_st->st_uid);
3518 __put_user(host_st->st_gid, &target_st->st_gid);
3519 __put_user(host_st->st_rdev, &target_st->st_rdev);
3520 __put_user(host_st->st_size, &target_st->st_size);
3521 __put_user(host_st->st_blksize, &target_st->st_blksize);
3522 __put_user(host_st->st_blocks, &target_st->st_blocks);
3523 __put_user(host_st->st_atime, &target_st->target_st_atime);
3524 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3525 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3526 unlock_user_struct(target_st, target_addr, 1);
3530 #if TARGET_LONG_BITS == 64
3531 struct target_stat *target_st;
3533 struct target_stat64 *target_st;
3536 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3537 return -TARGET_EFAULT;
3538 memset(target_st, 0, sizeof(*target_st));
3539 __put_user(host_st->st_dev, &target_st->st_dev);
3540 __put_user(host_st->st_ino, &target_st->st_ino);
3541 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3542 __put_user(host_st->st_ino, &target_st->__st_ino);
3544 __put_user(host_st->st_mode, &target_st->st_mode);
3545 __put_user(host_st->st_nlink, &target_st->st_nlink);
3546 __put_user(host_st->st_uid, &target_st->st_uid);
3547 __put_user(host_st->st_gid, &target_st->st_gid);
3548 __put_user(host_st->st_rdev, &target_st->st_rdev);
3549 /* XXX: better use of kernel struct */
3550 __put_user(host_st->st_size, &target_st->st_size);
3551 __put_user(host_st->st_blksize, &target_st->st_blksize);
3552 __put_user(host_st->st_blocks, &target_st->st_blocks);
3553 __put_user(host_st->st_atime, &target_st->target_st_atime);
3554 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3555 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3556 unlock_user_struct(target_st, target_addr, 1);
3563 #if defined(USE_NPTL)
3564 /* ??? Using host futex calls even when target atomic operations
3565 are not really atomic probably breaks things. However implementing
3566 futexes locally would make futexes shared between multiple processes
3567 tricky. However they're probably useless because guest atomic
3568 operations won't work either. */
3569 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3570 target_ulong uaddr2, int val3)
3572 struct timespec ts, *pts;
3574 /* ??? We assume FUTEX_* constants are the same on both host
3580 target_to_host_timespec(pts, timeout);
3584 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3587 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3589 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3591 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3592 NULL, g2h(uaddr2), 0));
3593 case FUTEX_CMP_REQUEUE:
3594 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3595 NULL, g2h(uaddr2), tswap32(val3)));
3597 return -TARGET_ENOSYS;
3602 int get_osversion(void)
3604 static int osversion;
3605 struct new_utsname buf;
3610 if (qemu_uname_release && *qemu_uname_release) {
3611 s = qemu_uname_release;
3613 if (sys_uname(&buf))
3618 for (i = 0; i < 3; i++) {
3620 while (*s >= '0' && *s <= '9') {
3625 tmp = (tmp << 8) + n;
3633 /* do_syscall() should always have a single exit point at the end so
3634 that actions, such as logging of syscall results, can be performed.
3635 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3636 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3637 abi_long arg2, abi_long arg3, abi_long arg4,
3638 abi_long arg5, abi_long arg6)
3646 gemu_log("syscall %d", num);
3649 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3652 case TARGET_NR_exit:
3656 gdb_exit(cpu_env, arg1);
3657 /* XXX: should free thread stack and CPU env */
3659 ret = 0; /* avoid warning */
3661 case TARGET_NR_read:
3662 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3664 ret = get_errno(read(arg1, p, arg3));
3665 unlock_user(p, arg2, ret);
3667 case TARGET_NR_write:
3668 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3670 ret = get_errno(write(arg1, p, arg3));
3671 unlock_user(p, arg2, 0);
3673 case TARGET_NR_open:
3674 if (!(p = lock_user_string(arg1)))
3676 ret = get_errno(open(path(p),
3677 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3679 unlock_user(p, arg1, 0);
3681 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3682 case TARGET_NR_openat:
3683 if (!(p = lock_user_string(arg2)))
3685 ret = get_errno(sys_openat(arg1,
3687 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3689 unlock_user(p, arg2, 0);
3692 case TARGET_NR_close:
3693 ret = get_errno(close(arg1));
3698 case TARGET_NR_fork:
3699 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3701 #ifdef TARGET_NR_waitpid
3702 case TARGET_NR_waitpid:
3705 ret = get_errno(waitpid(arg1, &status, arg3));
3706 if (!is_error(ret) && arg2
3707 && put_user_s32(status, arg2))
3712 #ifdef TARGET_NR_waitid
3713 case TARGET_NR_waitid:
3717 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3718 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3719 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3721 host_to_target_siginfo(p, &info);
3722 unlock_user(p, arg3, sizeof(target_siginfo_t));
3727 #ifdef TARGET_NR_creat /* not on alpha */
3728 case TARGET_NR_creat:
3729 if (!(p = lock_user_string(arg1)))
3731 ret = get_errno(creat(p, arg2));
3732 unlock_user(p, arg1, 0);
3735 case TARGET_NR_link:
3738 p = lock_user_string(arg1);
3739 p2 = lock_user_string(arg2);
3741 ret = -TARGET_EFAULT;
3743 ret = get_errno(link(p, p2));
3744 unlock_user(p2, arg2, 0);
3745 unlock_user(p, arg1, 0);
3748 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3749 case TARGET_NR_linkat:
3754 p = lock_user_string(arg2);
3755 p2 = lock_user_string(arg4);
3757 ret = -TARGET_EFAULT;
3759 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3760 unlock_user(p, arg2, 0);
3761 unlock_user(p2, arg4, 0);
3765 case TARGET_NR_unlink:
3766 if (!(p = lock_user_string(arg1)))
3768 ret = get_errno(unlink(p));
3769 unlock_user(p, arg1, 0);
3771 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3772 case TARGET_NR_unlinkat:
3773 if (!(p = lock_user_string(arg2)))
3775 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3776 unlock_user(p, arg2, 0);
3779 case TARGET_NR_execve:
3781 char **argp, **envp;
3784 abi_ulong guest_argp;
3785 abi_ulong guest_envp;
3791 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3792 if (get_user_ual(addr, gp))
3800 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3801 if (get_user_ual(addr, gp))
3808 argp = alloca((argc + 1) * sizeof(void *));
3809 envp = alloca((envc + 1) * sizeof(void *));
3811 for (gp = guest_argp, q = argp; gp;
3812 gp += sizeof(abi_ulong), q++) {
3813 if (get_user_ual(addr, gp))
3817 if (!(*q = lock_user_string(addr)))
3822 for (gp = guest_envp, q = envp; gp;
3823 gp += sizeof(abi_ulong), q++) {
3824 if (get_user_ual(addr, gp))
3828 if (!(*q = lock_user_string(addr)))
3833 if (!(p = lock_user_string(arg1)))
3835 ret = get_errno(execve(p, argp, envp));
3836 unlock_user(p, arg1, 0);
3841 ret = -TARGET_EFAULT;
3844 for (gp = guest_argp, q = argp; *q;
3845 gp += sizeof(abi_ulong), q++) {
3846 if (get_user_ual(addr, gp)
3849 unlock_user(*q, addr, 0);
3851 for (gp = guest_envp, q = envp; *q;
3852 gp += sizeof(abi_ulong), q++) {
3853 if (get_user_ual(addr, gp)
3856 unlock_user(*q, addr, 0);
3860 case TARGET_NR_chdir:
3861 if (!(p = lock_user_string(arg1)))
3863 ret = get_errno(chdir(p));
3864 unlock_user(p, arg1, 0);
3866 #ifdef TARGET_NR_time
3867 case TARGET_NR_time:
3870 ret = get_errno(time(&host_time));
3873 && put_user_sal(host_time, arg1))
3878 case TARGET_NR_mknod:
3879 if (!(p = lock_user_string(arg1)))
3881 ret = get_errno(mknod(p, arg2, arg3));
3882 unlock_user(p, arg1, 0);
3884 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3885 case TARGET_NR_mknodat:
3886 if (!(p = lock_user_string(arg2)))
3888 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3889 unlock_user(p, arg2, 0);
3892 case TARGET_NR_chmod:
3893 if (!(p = lock_user_string(arg1)))
3895 ret = get_errno(chmod(p, arg2));
3896 unlock_user(p, arg1, 0);
3898 #ifdef TARGET_NR_break
3899 case TARGET_NR_break:
3902 #ifdef TARGET_NR_oldstat
3903 case TARGET_NR_oldstat:
3906 case TARGET_NR_lseek:
3907 ret = get_errno(lseek(arg1, arg2, arg3));
3909 #ifdef TARGET_NR_getxpid
3910 case TARGET_NR_getxpid:
3912 case TARGET_NR_getpid:
3914 ret = get_errno(getpid());
3916 case TARGET_NR_mount:
3918 /* need to look at the data field */
3920 p = lock_user_string(arg1);
3921 p2 = lock_user_string(arg2);
3922 p3 = lock_user_string(arg3);
3923 if (!p || !p2 || !p3)
3924 ret = -TARGET_EFAULT;
3926 /* FIXME - arg5 should be locked, but it isn't clear how to
3927 * do that since it's not guaranteed to be a NULL-terminated
3930 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
3931 unlock_user(p, arg1, 0);
3932 unlock_user(p2, arg2, 0);
3933 unlock_user(p3, arg3, 0);
3936 #ifdef TARGET_NR_umount
3937 case TARGET_NR_umount:
3938 if (!(p = lock_user_string(arg1)))
3940 ret = get_errno(umount(p));
3941 unlock_user(p, arg1, 0);
3944 #ifdef TARGET_NR_stime /* not on alpha */
3945 case TARGET_NR_stime:
3948 if (get_user_sal(host_time, arg1))
3950 ret = get_errno(stime(&host_time));
3954 case TARGET_NR_ptrace:
3956 #ifdef TARGET_NR_alarm /* not on alpha */
3957 case TARGET_NR_alarm:
3961 #ifdef TARGET_NR_oldfstat
3962 case TARGET_NR_oldfstat:
3965 #ifdef TARGET_NR_pause /* not on alpha */
3966 case TARGET_NR_pause:
3967 ret = get_errno(pause());
3970 #ifdef TARGET_NR_utime
3971 case TARGET_NR_utime:
3973 struct utimbuf tbuf, *host_tbuf;
3974 struct target_utimbuf *target_tbuf;
3976 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
3978 tbuf.actime = tswapl(target_tbuf->actime);
3979 tbuf.modtime = tswapl(target_tbuf->modtime);
3980 unlock_user_struct(target_tbuf, arg2, 0);
3985 if (!(p = lock_user_string(arg1)))
3987 ret = get_errno(utime(p, host_tbuf));
3988 unlock_user(p, arg1, 0);
3992 case TARGET_NR_utimes:
3994 struct timeval *tvp, tv[2];
3996 if (copy_from_user_timeval(&tv[0], arg2)
3997 || copy_from_user_timeval(&tv[1],
3998 arg2 + sizeof(struct target_timeval)))
4004 if (!(p = lock_user_string(arg1)))
4006 ret = get_errno(utimes(p, tvp));
4007 unlock_user(p, arg1, 0);
4010 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4011 case TARGET_NR_futimesat:
4013 struct timeval *tvp, tv[2];
4015 if (copy_from_user_timeval(&tv[0], arg3)
4016 || copy_from_user_timeval(&tv[1],
4017 arg3 + sizeof(struct target_timeval)))
4023 if (!(p = lock_user_string(arg2)))
4025 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4026 unlock_user(p, arg2, 0);
4030 #ifdef TARGET_NR_stty
4031 case TARGET_NR_stty:
4034 #ifdef TARGET_NR_gtty
4035 case TARGET_NR_gtty:
4038 case TARGET_NR_access:
4039 if (!(p = lock_user_string(arg1)))
4041 ret = get_errno(access(p, arg2));
4042 unlock_user(p, arg1, 0);
4044 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4045 case TARGET_NR_faccessat:
4046 if (!(p = lock_user_string(arg2)))
4048 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
4049 unlock_user(p, arg2, 0);
4052 #ifdef TARGET_NR_nice /* not on alpha */
4053 case TARGET_NR_nice:
4054 ret = get_errno(nice(arg1));
4057 #ifdef TARGET_NR_ftime
4058 case TARGET_NR_ftime:
4061 case TARGET_NR_sync:
4065 case TARGET_NR_kill:
4066 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4068 case TARGET_NR_rename:
4071 p = lock_user_string(arg1);
4072 p2 = lock_user_string(arg2);
4074 ret = -TARGET_EFAULT;
4076 ret = get_errno(rename(p, p2));
4077 unlock_user(p2, arg2, 0);
4078 unlock_user(p, arg1, 0);
4081 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4082 case TARGET_NR_renameat:
4085 p = lock_user_string(arg2);
4086 p2 = lock_user_string(arg4);
4088 ret = -TARGET_EFAULT;
4090 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4091 unlock_user(p2, arg4, 0);
4092 unlock_user(p, arg2, 0);
4096 case TARGET_NR_mkdir:
4097 if (!(p = lock_user_string(arg1)))
4099 ret = get_errno(mkdir(p, arg2));
4100 unlock_user(p, arg1, 0);
4102 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4103 case TARGET_NR_mkdirat:
4104 if (!(p = lock_user_string(arg2)))
4106 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4107 unlock_user(p, arg2, 0);
4110 case TARGET_NR_rmdir:
4111 if (!(p = lock_user_string(arg1)))
4113 ret = get_errno(rmdir(p));
4114 unlock_user(p, arg1, 0);
4117 ret = get_errno(dup(arg1));
4119 case TARGET_NR_pipe:
4122 ret = get_errno(pipe(host_pipe));
4123 if (!is_error(ret)) {
4124 #if defined(TARGET_MIPS)
4125 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4126 env->active_tc.gpr[3] = host_pipe[1];
4128 #elif defined(TARGET_SH4)
4129 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4132 if (put_user_s32(host_pipe[0], arg1)
4133 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4139 case TARGET_NR_times:
4141 struct target_tms *tmsp;
4143 ret = get_errno(times(&tms));
4145 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4148 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4149 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4150 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4151 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4154 ret = host_to_target_clock_t(ret);
4157 #ifdef TARGET_NR_prof
4158 case TARGET_NR_prof:
4161 #ifdef TARGET_NR_signal
4162 case TARGET_NR_signal:
4165 case TARGET_NR_acct:
4166 if (!(p = lock_user_string(arg1)))
4168 ret = get_errno(acct(path(p)));
4169 unlock_user(p, arg1, 0);
4171 #ifdef TARGET_NR_umount2 /* not on alpha */
4172 case TARGET_NR_umount2:
4173 if (!(p = lock_user_string(arg1)))
4175 ret = get_errno(umount2(p, arg2));
4176 unlock_user(p, arg1, 0);
4179 #ifdef TARGET_NR_lock
4180 case TARGET_NR_lock:
4183 case TARGET_NR_ioctl:
4184 ret = do_ioctl(arg1, arg2, arg3);
4186 case TARGET_NR_fcntl:
4187 ret = do_fcntl(arg1, arg2, arg3);
4189 #ifdef TARGET_NR_mpx
4193 case TARGET_NR_setpgid:
4194 ret = get_errno(setpgid(arg1, arg2));
4196 #ifdef TARGET_NR_ulimit
4197 case TARGET_NR_ulimit:
4200 #ifdef TARGET_NR_oldolduname
4201 case TARGET_NR_oldolduname:
4204 case TARGET_NR_umask:
4205 ret = get_errno(umask(arg1));
4207 case TARGET_NR_chroot:
4208 if (!(p = lock_user_string(arg1)))
4210 ret = get_errno(chroot(p));
4211 unlock_user(p, arg1, 0);
4213 case TARGET_NR_ustat:
4215 case TARGET_NR_dup2:
4216 ret = get_errno(dup2(arg1, arg2));
4218 #ifdef TARGET_NR_getppid /* not on alpha */
4219 case TARGET_NR_getppid:
4220 ret = get_errno(getppid());
4223 case TARGET_NR_getpgrp:
4224 ret = get_errno(getpgrp());
4226 case TARGET_NR_setsid:
4227 ret = get_errno(setsid());
4229 #ifdef TARGET_NR_sigaction
4230 case TARGET_NR_sigaction:
4232 #if !defined(TARGET_MIPS)
4233 struct target_old_sigaction *old_act;
4234 struct target_sigaction act, oact, *pact;
4236 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4238 act._sa_handler = old_act->_sa_handler;
4239 target_siginitset(&act.sa_mask, old_act->sa_mask);
4240 act.sa_flags = old_act->sa_flags;
4241 act.sa_restorer = old_act->sa_restorer;
4242 unlock_user_struct(old_act, arg2, 0);
4247 ret = get_errno(do_sigaction(arg1, pact, &oact));
4248 if (!is_error(ret) && arg3) {
4249 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4251 old_act->_sa_handler = oact._sa_handler;
4252 old_act->sa_mask = oact.sa_mask.sig[0];
4253 old_act->sa_flags = oact.sa_flags;
4254 old_act->sa_restorer = oact.sa_restorer;
4255 unlock_user_struct(old_act, arg3, 1);
4258 struct target_sigaction act, oact, *pact, *old_act;
4261 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4263 act._sa_handler = old_act->_sa_handler;
4264 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4265 act.sa_flags = old_act->sa_flags;
4266 unlock_user_struct(old_act, arg2, 0);
4272 ret = get_errno(do_sigaction(arg1, pact, &oact));
4274 if (!is_error(ret) && arg3) {
4275 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4277 old_act->_sa_handler = oact._sa_handler;
4278 old_act->sa_flags = oact.sa_flags;
4279 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4280 old_act->sa_mask.sig[1] = 0;
4281 old_act->sa_mask.sig[2] = 0;
4282 old_act->sa_mask.sig[3] = 0;
4283 unlock_user_struct(old_act, arg3, 1);
4289 case TARGET_NR_rt_sigaction:
4291 struct target_sigaction *act;
4292 struct target_sigaction *oact;
4295 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4300 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4301 ret = -TARGET_EFAULT;
4302 goto rt_sigaction_fail;
4306 ret = get_errno(do_sigaction(arg1, act, oact));
4309 unlock_user_struct(act, arg2, 0);
4311 unlock_user_struct(oact, arg3, 1);
4314 #ifdef TARGET_NR_sgetmask /* not on alpha */
4315 case TARGET_NR_sgetmask:
4318 abi_ulong target_set;
4319 sigprocmask(0, NULL, &cur_set);
4320 host_to_target_old_sigset(&target_set, &cur_set);
4325 #ifdef TARGET_NR_ssetmask /* not on alpha */
4326 case TARGET_NR_ssetmask:
4328 sigset_t set, oset, cur_set;
4329 abi_ulong target_set = arg1;
4330 sigprocmask(0, NULL, &cur_set);
4331 target_to_host_old_sigset(&set, &target_set);
4332 sigorset(&set, &set, &cur_set);
4333 sigprocmask(SIG_SETMASK, &set, &oset);
4334 host_to_target_old_sigset(&target_set, &oset);
4339 #ifdef TARGET_NR_sigprocmask
4340 case TARGET_NR_sigprocmask:
4343 sigset_t set, oldset, *set_ptr;
4347 case TARGET_SIG_BLOCK:
4350 case TARGET_SIG_UNBLOCK:
4353 case TARGET_SIG_SETMASK:
4357 ret = -TARGET_EINVAL;
4360 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4362 target_to_host_old_sigset(&set, p);
4363 unlock_user(p, arg2, 0);
4369 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4370 if (!is_error(ret) && arg3) {
4371 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4373 host_to_target_old_sigset(p, &oldset);
4374 unlock_user(p, arg3, sizeof(target_sigset_t));
4379 case TARGET_NR_rt_sigprocmask:
4382 sigset_t set, oldset, *set_ptr;
4386 case TARGET_SIG_BLOCK:
4389 case TARGET_SIG_UNBLOCK:
4392 case TARGET_SIG_SETMASK:
4396 ret = -TARGET_EINVAL;
4399 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4401 target_to_host_sigset(&set, p);
4402 unlock_user(p, arg2, 0);
4408 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4409 if (!is_error(ret) && arg3) {
4410 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4412 host_to_target_sigset(p, &oldset);
4413 unlock_user(p, arg3, sizeof(target_sigset_t));
4417 #ifdef TARGET_NR_sigpending
4418 case TARGET_NR_sigpending:
4421 ret = get_errno(sigpending(&set));
4422 if (!is_error(ret)) {
4423 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4425 host_to_target_old_sigset(p, &set);
4426 unlock_user(p, arg1, sizeof(target_sigset_t));
4431 case TARGET_NR_rt_sigpending:
4434 ret = get_errno(sigpending(&set));
4435 if (!is_error(ret)) {
4436 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4438 host_to_target_sigset(p, &set);
4439 unlock_user(p, arg1, sizeof(target_sigset_t));
4443 #ifdef TARGET_NR_sigsuspend
4444 case TARGET_NR_sigsuspend:
4447 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4449 target_to_host_old_sigset(&set, p);
4450 unlock_user(p, arg1, 0);
4451 ret = get_errno(sigsuspend(&set));
4455 case TARGET_NR_rt_sigsuspend:
4458 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4460 target_to_host_sigset(&set, p);
4461 unlock_user(p, arg1, 0);
4462 ret = get_errno(sigsuspend(&set));
4465 case TARGET_NR_rt_sigtimedwait:
4468 struct timespec uts, *puts;
4471 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4473 target_to_host_sigset(&set, p);
4474 unlock_user(p, arg1, 0);
4477 target_to_host_timespec(puts, arg3);
4481 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4482 if (!is_error(ret) && arg2) {
4483 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4485 host_to_target_siginfo(p, &uinfo);
4486 unlock_user(p, arg2, sizeof(target_siginfo_t));
4490 case TARGET_NR_rt_sigqueueinfo:
4493 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4495 target_to_host_siginfo(&uinfo, p);
4496 unlock_user(p, arg1, 0);
4497 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4500 #ifdef TARGET_NR_sigreturn
4501 case TARGET_NR_sigreturn:
4502 /* NOTE: ret is eax, so not transcoding must be done */
4503 ret = do_sigreturn(cpu_env);
4506 case TARGET_NR_rt_sigreturn:
4507 /* NOTE: ret is eax, so not transcoding must be done */
4508 ret = do_rt_sigreturn(cpu_env);
4510 case TARGET_NR_sethostname:
4511 if (!(p = lock_user_string(arg1)))
4513 ret = get_errno(sethostname(p, arg2));
4514 unlock_user(p, arg1, 0);
4516 case TARGET_NR_setrlimit:
4518 /* XXX: convert resource ? */
4519 int resource = arg1;
4520 struct target_rlimit *target_rlim;
4522 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4524 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4525 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4526 unlock_user_struct(target_rlim, arg2, 0);
4527 ret = get_errno(setrlimit(resource, &rlim));
4530 case TARGET_NR_getrlimit:
4532 /* XXX: convert resource ? */
4533 int resource = arg1;
4534 struct target_rlimit *target_rlim;
4537 ret = get_errno(getrlimit(resource, &rlim));
4538 if (!is_error(ret)) {
4539 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4541 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4542 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4543 unlock_user_struct(target_rlim, arg2, 1);
4547 case TARGET_NR_getrusage:
4549 struct rusage rusage;
4550 ret = get_errno(getrusage(arg1, &rusage));
4551 if (!is_error(ret)) {
4552 host_to_target_rusage(arg2, &rusage);
4556 case TARGET_NR_gettimeofday:
4559 ret = get_errno(gettimeofday(&tv, NULL));
4560 if (!is_error(ret)) {
4561 if (copy_to_user_timeval(arg1, &tv))
4566 case TARGET_NR_settimeofday:
4569 if (copy_from_user_timeval(&tv, arg1))
4571 ret = get_errno(settimeofday(&tv, NULL));
4574 #ifdef TARGET_NR_select
4575 case TARGET_NR_select:
4577 struct target_sel_arg_struct *sel;
4578 abi_ulong inp, outp, exp, tvp;
4581 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4583 nsel = tswapl(sel->n);
4584 inp = tswapl(sel->inp);
4585 outp = tswapl(sel->outp);
4586 exp = tswapl(sel->exp);
4587 tvp = tswapl(sel->tvp);
4588 unlock_user_struct(sel, arg1, 0);
4589 ret = do_select(nsel, inp, outp, exp, tvp);
4593 case TARGET_NR_symlink:
4596 p = lock_user_string(arg1);
4597 p2 = lock_user_string(arg2);
4599 ret = -TARGET_EFAULT;
4601 ret = get_errno(symlink(p, p2));
4602 unlock_user(p2, arg2, 0);
4603 unlock_user(p, arg1, 0);
4606 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4607 case TARGET_NR_symlinkat:
4610 p = lock_user_string(arg1);
4611 p2 = lock_user_string(arg3);
4613 ret = -TARGET_EFAULT;
4615 ret = get_errno(sys_symlinkat(p, arg2, p2));
4616 unlock_user(p2, arg3, 0);
4617 unlock_user(p, arg1, 0);
4621 #ifdef TARGET_NR_oldlstat
4622 case TARGET_NR_oldlstat:
4625 case TARGET_NR_readlink:
4628 p = lock_user_string(arg1);
4629 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4631 ret = -TARGET_EFAULT;
4633 ret = get_errno(readlink(path(p), p2, arg3));
4634 unlock_user(p2, arg2, ret);
4635 unlock_user(p, arg1, 0);
4638 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4639 case TARGET_NR_readlinkat:
4642 p = lock_user_string(arg2);
4643 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4645 ret = -TARGET_EFAULT;
4647 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4648 unlock_user(p2, arg3, ret);
4649 unlock_user(p, arg2, 0);
4653 #ifdef TARGET_NR_uselib
4654 case TARGET_NR_uselib:
4657 #ifdef TARGET_NR_swapon
4658 case TARGET_NR_swapon:
4659 if (!(p = lock_user_string(arg1)))
4661 ret = get_errno(swapon(p, arg2));
4662 unlock_user(p, arg1, 0);
4665 case TARGET_NR_reboot:
4667 #ifdef TARGET_NR_readdir
4668 case TARGET_NR_readdir:
4671 #ifdef TARGET_NR_mmap
4672 case TARGET_NR_mmap:
4673 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4676 abi_ulong v1, v2, v3, v4, v5, v6;
4677 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4685 unlock_user(v, arg1, 0);
4686 ret = get_errno(target_mmap(v1, v2, v3,
4687 target_to_host_bitmask(v4, mmap_flags_tbl),
4691 ret = get_errno(target_mmap(arg1, arg2, arg3,
4692 target_to_host_bitmask(arg4, mmap_flags_tbl),
4698 #ifdef TARGET_NR_mmap2
4699 case TARGET_NR_mmap2:
4701 #define MMAP_SHIFT 12
4703 ret = get_errno(target_mmap(arg1, arg2, arg3,
4704 target_to_host_bitmask(arg4, mmap_flags_tbl),
4706 arg6 << MMAP_SHIFT));
4709 case TARGET_NR_munmap:
4710 ret = get_errno(target_munmap(arg1, arg2));
4712 case TARGET_NR_mprotect:
4713 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4715 #ifdef TARGET_NR_mremap
4716 case TARGET_NR_mremap:
4717 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4720 /* ??? msync/mlock/munlock are broken for softmmu. */
4721 #ifdef TARGET_NR_msync
4722 case TARGET_NR_msync:
4723 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4726 #ifdef TARGET_NR_mlock
4727 case TARGET_NR_mlock:
4728 ret = get_errno(mlock(g2h(arg1), arg2));
4731 #ifdef TARGET_NR_munlock
4732 case TARGET_NR_munlock:
4733 ret = get_errno(munlock(g2h(arg1), arg2));
4736 #ifdef TARGET_NR_mlockall
4737 case TARGET_NR_mlockall:
4738 ret = get_errno(mlockall(arg1));
4741 #ifdef TARGET_NR_munlockall
4742 case TARGET_NR_munlockall:
4743 ret = get_errno(munlockall());
4746 case TARGET_NR_truncate:
4747 if (!(p = lock_user_string(arg1)))
4749 ret = get_errno(truncate(p, arg2));
4750 unlock_user(p, arg1, 0);
4752 case TARGET_NR_ftruncate:
4753 ret = get_errno(ftruncate(arg1, arg2));
4755 case TARGET_NR_fchmod:
4756 ret = get_errno(fchmod(arg1, arg2));
4758 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4759 case TARGET_NR_fchmodat:
4760 if (!(p = lock_user_string(arg2)))
4762 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4763 unlock_user(p, arg2, 0);
4766 case TARGET_NR_getpriority:
4767 /* libc does special remapping of the return value of
4768 * sys_getpriority() so it's just easiest to call
4769 * sys_getpriority() directly rather than through libc. */
4770 ret = sys_getpriority(arg1, arg2);
4772 case TARGET_NR_setpriority:
4773 ret = get_errno(setpriority(arg1, arg2, arg3));
4775 #ifdef TARGET_NR_profil
4776 case TARGET_NR_profil:
4779 case TARGET_NR_statfs:
4780 if (!(p = lock_user_string(arg1)))
4782 ret = get_errno(statfs(path(p), &stfs));
4783 unlock_user(p, arg1, 0);
4785 if (!is_error(ret)) {
4786 struct target_statfs *target_stfs;
4788 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4790 __put_user(stfs.f_type, &target_stfs->f_type);
4791 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4792 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4793 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4794 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4795 __put_user(stfs.f_files, &target_stfs->f_files);
4796 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4797 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4798 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4799 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4800 unlock_user_struct(target_stfs, arg2, 1);
4803 case TARGET_NR_fstatfs:
4804 ret = get_errno(fstatfs(arg1, &stfs));
4805 goto convert_statfs;
4806 #ifdef TARGET_NR_statfs64
4807 case TARGET_NR_statfs64:
4808 if (!(p = lock_user_string(arg1)))
4810 ret = get_errno(statfs(path(p), &stfs));
4811 unlock_user(p, arg1, 0);
4813 if (!is_error(ret)) {
4814 struct target_statfs64 *target_stfs;
4816 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4818 __put_user(stfs.f_type, &target_stfs->f_type);
4819 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4820 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4821 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4822 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4823 __put_user(stfs.f_files, &target_stfs->f_files);
4824 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4825 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4826 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4827 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4828 unlock_user_struct(target_stfs, arg3, 1);
4831 case TARGET_NR_fstatfs64:
4832 ret = get_errno(fstatfs(arg1, &stfs));
4833 goto convert_statfs64;
4835 #ifdef TARGET_NR_ioperm
4836 case TARGET_NR_ioperm:
4839 #ifdef TARGET_NR_socketcall
4840 case TARGET_NR_socketcall:
4841 ret = do_socketcall(arg1, arg2);
4844 #ifdef TARGET_NR_accept
4845 case TARGET_NR_accept:
4846 ret = do_accept(arg1, arg2, arg3);
4849 #ifdef TARGET_NR_bind
4850 case TARGET_NR_bind:
4851 ret = do_bind(arg1, arg2, arg3);
4854 #ifdef TARGET_NR_connect
4855 case TARGET_NR_connect:
4856 ret = do_connect(arg1, arg2, arg3);
4859 #ifdef TARGET_NR_getpeername
4860 case TARGET_NR_getpeername:
4861 ret = do_getpeername(arg1, arg2, arg3);
4864 #ifdef TARGET_NR_getsockname
4865 case TARGET_NR_getsockname:
4866 ret = do_getsockname(arg1, arg2, arg3);
4869 #ifdef TARGET_NR_getsockopt
4870 case TARGET_NR_getsockopt:
4871 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4874 #ifdef TARGET_NR_listen
4875 case TARGET_NR_listen:
4876 ret = get_errno(listen(arg1, arg2));
4879 #ifdef TARGET_NR_recv
4880 case TARGET_NR_recv:
4881 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4884 #ifdef TARGET_NR_recvfrom
4885 case TARGET_NR_recvfrom:
4886 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4889 #ifdef TARGET_NR_recvmsg
4890 case TARGET_NR_recvmsg:
4891 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4894 #ifdef TARGET_NR_send
4895 case TARGET_NR_send:
4896 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4899 #ifdef TARGET_NR_sendmsg
4900 case TARGET_NR_sendmsg:
4901 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
4904 #ifdef TARGET_NR_sendto
4905 case TARGET_NR_sendto:
4906 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
4909 #ifdef TARGET_NR_shutdown
4910 case TARGET_NR_shutdown:
4911 ret = get_errno(shutdown(arg1, arg2));
4914 #ifdef TARGET_NR_socket
4915 case TARGET_NR_socket:
4916 ret = do_socket(arg1, arg2, arg3);
4919 #ifdef TARGET_NR_socketpair
4920 case TARGET_NR_socketpair:
4921 ret = do_socketpair(arg1, arg2, arg3, arg4);
4924 #ifdef TARGET_NR_setsockopt
4925 case TARGET_NR_setsockopt:
4926 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
4930 case TARGET_NR_syslog:
4931 if (!(p = lock_user_string(arg2)))
4933 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
4934 unlock_user(p, arg2, 0);
4937 case TARGET_NR_setitimer:
4939 struct itimerval value, ovalue, *pvalue;
4943 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
4944 || copy_from_user_timeval(&pvalue->it_value,
4945 arg2 + sizeof(struct target_timeval)))
4950 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
4951 if (!is_error(ret) && arg3) {
4952 if (copy_to_user_timeval(arg3,
4953 &ovalue.it_interval)
4954 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
4960 case TARGET_NR_getitimer:
4962 struct itimerval value;
4964 ret = get_errno(getitimer(arg1, &value));
4965 if (!is_error(ret) && arg2) {
4966 if (copy_to_user_timeval(arg2,
4968 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
4974 case TARGET_NR_stat:
4975 if (!(p = lock_user_string(arg1)))
4977 ret = get_errno(stat(path(p), &st));
4978 unlock_user(p, arg1, 0);
4980 case TARGET_NR_lstat:
4981 if (!(p = lock_user_string(arg1)))
4983 ret = get_errno(lstat(path(p), &st));
4984 unlock_user(p, arg1, 0);
4986 case TARGET_NR_fstat:
4988 ret = get_errno(fstat(arg1, &st));
4990 if (!is_error(ret)) {
4991 struct target_stat *target_st;
4993 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
4995 __put_user(st.st_dev, &target_st->st_dev);
4996 __put_user(st.st_ino, &target_st->st_ino);
4997 __put_user(st.st_mode, &target_st->st_mode);
4998 __put_user(st.st_uid, &target_st->st_uid);
4999 __put_user(st.st_gid, &target_st->st_gid);
5000 __put_user(st.st_nlink, &target_st->st_nlink);
5001 __put_user(st.st_rdev, &target_st->st_rdev);
5002 __put_user(st.st_size, &target_st->st_size);
5003 __put_user(st.st_blksize, &target_st->st_blksize);
5004 __put_user(st.st_blocks, &target_st->st_blocks);
5005 __put_user(st.st_atime, &target_st->target_st_atime);
5006 __put_user(st.st_mtime, &target_st->target_st_mtime);
5007 __put_user(st.st_ctime, &target_st->target_st_ctime);
5008 unlock_user_struct(target_st, arg2, 1);
5012 #ifdef TARGET_NR_olduname
5013 case TARGET_NR_olduname:
5016 #ifdef TARGET_NR_iopl
5017 case TARGET_NR_iopl:
5020 case TARGET_NR_vhangup:
5021 ret = get_errno(vhangup());
5023 #ifdef TARGET_NR_idle
5024 case TARGET_NR_idle:
5027 #ifdef TARGET_NR_syscall
5028 case TARGET_NR_syscall:
5029 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5032 case TARGET_NR_wait4:
5035 abi_long status_ptr = arg2;
5036 struct rusage rusage, *rusage_ptr;
5037 abi_ulong target_rusage = arg4;
5039 rusage_ptr = &rusage;
5042 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5043 if (!is_error(ret)) {
5045 if (put_user_s32(status, status_ptr))
5049 host_to_target_rusage(target_rusage, &rusage);
5053 #ifdef TARGET_NR_swapoff
5054 case TARGET_NR_swapoff:
5055 if (!(p = lock_user_string(arg1)))
5057 ret = get_errno(swapoff(p));
5058 unlock_user(p, arg1, 0);
5061 case TARGET_NR_sysinfo:
5063 struct target_sysinfo *target_value;
5064 struct sysinfo value;
5065 ret = get_errno(sysinfo(&value));
5066 if (!is_error(ret) && arg1)
5068 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5070 __put_user(value.uptime, &target_value->uptime);
5071 __put_user(value.loads[0], &target_value->loads[0]);
5072 __put_user(value.loads[1], &target_value->loads[1]);
5073 __put_user(value.loads[2], &target_value->loads[2]);
5074 __put_user(value.totalram, &target_value->totalram);
5075 __put_user(value.freeram, &target_value->freeram);
5076 __put_user(value.sharedram, &target_value->sharedram);
5077 __put_user(value.bufferram, &target_value->bufferram);
5078 __put_user(value.totalswap, &target_value->totalswap);
5079 __put_user(value.freeswap, &target_value->freeswap);
5080 __put_user(value.procs, &target_value->procs);
5081 __put_user(value.totalhigh, &target_value->totalhigh);
5082 __put_user(value.freehigh, &target_value->freehigh);
5083 __put_user(value.mem_unit, &target_value->mem_unit);
5084 unlock_user_struct(target_value, arg1, 1);
5088 #ifdef TARGET_NR_ipc
5090 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5093 #ifdef TARGET_NR_semget
5094 case TARGET_NR_semget:
5095 ret = get_errno(semget(arg1, arg2, arg3));
5098 #ifdef TARGET_NR_semop
5099 case TARGET_NR_semop:
5100 ret = get_errno(do_semop(arg1, arg2, arg3));
5103 #ifdef TARGET_NR_semctl
5104 case TARGET_NR_semctl:
5105 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5108 #ifdef TARGET_NR_msgctl
5109 case TARGET_NR_msgctl:
5110 ret = do_msgctl(arg1, arg2, arg3);
5113 #ifdef TARGET_NR_msgget
5114 case TARGET_NR_msgget:
5115 ret = get_errno(msgget(arg1, arg2));
5118 #ifdef TARGET_NR_msgrcv
5119 case TARGET_NR_msgrcv:
5120 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5123 #ifdef TARGET_NR_msgsnd
5124 case TARGET_NR_msgsnd:
5125 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5128 case TARGET_NR_fsync:
5129 ret = get_errno(fsync(arg1));
5131 case TARGET_NR_clone:
5132 #if defined(TARGET_SH4)
5133 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5134 #elif defined(TARGET_CRIS)
5135 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5137 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5140 #ifdef __NR_exit_group
5141 /* new thread calls */
5142 case TARGET_NR_exit_group:
5146 gdb_exit(cpu_env, arg1);
5147 ret = get_errno(exit_group(arg1));
5150 case TARGET_NR_setdomainname:
5151 if (!(p = lock_user_string(arg1)))
5153 ret = get_errno(setdomainname(p, arg2));
5154 unlock_user(p, arg1, 0);
5156 case TARGET_NR_uname:
5157 /* no need to transcode because we use the linux syscall */
5159 struct new_utsname * buf;
5161 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5163 ret = get_errno(sys_uname(buf));
5164 if (!is_error(ret)) {
5165 /* Overrite the native machine name with whatever is being
5167 strcpy (buf->machine, UNAME_MACHINE);
5168 /* Allow the user to override the reported release. */
5169 if (qemu_uname_release && *qemu_uname_release)
5170 strcpy (buf->release, qemu_uname_release);
5172 unlock_user_struct(buf, arg1, 1);
5176 case TARGET_NR_modify_ldt:
5177 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5179 #if !defined(TARGET_X86_64)
5180 case TARGET_NR_vm86old:
5182 case TARGET_NR_vm86:
5183 ret = do_vm86(cpu_env, arg1, arg2);
5187 case TARGET_NR_adjtimex:
5189 #ifdef TARGET_NR_create_module
5190 case TARGET_NR_create_module:
5192 case TARGET_NR_init_module:
5193 case TARGET_NR_delete_module:
5194 #ifdef TARGET_NR_get_kernel_syms
5195 case TARGET_NR_get_kernel_syms:
5198 case TARGET_NR_quotactl:
5200 case TARGET_NR_getpgid:
5201 ret = get_errno(getpgid(arg1));
5203 case TARGET_NR_fchdir:
5204 ret = get_errno(fchdir(arg1));
5206 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5207 case TARGET_NR_bdflush:
5210 #ifdef TARGET_NR_sysfs
5211 case TARGET_NR_sysfs:
5214 case TARGET_NR_personality:
5215 ret = get_errno(personality(arg1));
5217 #ifdef TARGET_NR_afs_syscall
5218 case TARGET_NR_afs_syscall:
5221 #ifdef TARGET_NR__llseek /* Not on alpha */
5222 case TARGET_NR__llseek:
5224 #if defined (__x86_64__)
5225 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5226 if (put_user_s64(ret, arg4))
5230 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5231 if (put_user_s64(res, arg4))
5237 case TARGET_NR_getdents:
5238 #if TARGET_ABI_BITS != 32
5240 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5242 struct target_dirent *target_dirp;
5243 struct linux_dirent *dirp;
5244 abi_long count = arg3;
5246 dirp = malloc(count);
5248 ret = -TARGET_ENOMEM;
5252 ret = get_errno(sys_getdents(arg1, dirp, count));
5253 if (!is_error(ret)) {
5254 struct linux_dirent *de;
5255 struct target_dirent *tde;
5257 int reclen, treclen;
5258 int count1, tnamelen;
5262 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5266 reclen = de->d_reclen;
5267 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5268 tde->d_reclen = tswap16(treclen);
5269 tde->d_ino = tswapl(de->d_ino);
5270 tde->d_off = tswapl(de->d_off);
5271 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5274 /* XXX: may not be correct */
5275 pstrcpy(tde->d_name, tnamelen, de->d_name);
5276 de = (struct linux_dirent *)((char *)de + reclen);
5278 tde = (struct target_dirent *)((char *)tde + treclen);
5282 unlock_user(target_dirp, arg2, ret);
5288 struct linux_dirent *dirp;
5289 abi_long count = arg3;
5291 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5293 ret = get_errno(sys_getdents(arg1, dirp, count));
5294 if (!is_error(ret)) {
5295 struct linux_dirent *de;
5300 reclen = de->d_reclen;
5303 de->d_reclen = tswap16(reclen);
5304 tswapls(&de->d_ino);
5305 tswapls(&de->d_off);
5306 de = (struct linux_dirent *)((char *)de + reclen);
5310 unlock_user(dirp, arg2, ret);
5314 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5315 case TARGET_NR_getdents64:
5317 struct linux_dirent64 *dirp;
5318 abi_long count = arg3;
5319 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5321 ret = get_errno(sys_getdents64(arg1, dirp, count));
5322 if (!is_error(ret)) {
5323 struct linux_dirent64 *de;
5328 reclen = de->d_reclen;
5331 de->d_reclen = tswap16(reclen);
5332 tswap64s((uint64_t *)&de->d_ino);
5333 tswap64s((uint64_t *)&de->d_off);
5334 de = (struct linux_dirent64 *)((char *)de + reclen);
5338 unlock_user(dirp, arg2, ret);
5341 #endif /* TARGET_NR_getdents64 */
5342 #ifdef TARGET_NR__newselect
5343 case TARGET_NR__newselect:
5344 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5347 #ifdef TARGET_NR_poll
5348 case TARGET_NR_poll:
5350 struct target_pollfd *target_pfd;
5351 unsigned int nfds = arg2;
5356 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5359 pfd = alloca(sizeof(struct pollfd) * nfds);
5360 for(i = 0; i < nfds; i++) {
5361 pfd[i].fd = tswap32(target_pfd[i].fd);
5362 pfd[i].events = tswap16(target_pfd[i].events);
5364 ret = get_errno(poll(pfd, nfds, timeout));
5365 if (!is_error(ret)) {
5366 for(i = 0; i < nfds; i++) {
5367 target_pfd[i].revents = tswap16(pfd[i].revents);
5369 ret += nfds * (sizeof(struct target_pollfd)
5370 - sizeof(struct pollfd));
5372 unlock_user(target_pfd, arg1, ret);
5376 case TARGET_NR_flock:
5377 /* NOTE: the flock constant seems to be the same for every
5379 ret = get_errno(flock(arg1, arg2));
5381 case TARGET_NR_readv:
5386 vec = alloca(count * sizeof(struct iovec));
5387 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5389 ret = get_errno(readv(arg1, vec, count));
5390 unlock_iovec(vec, arg2, count, 1);
5393 case TARGET_NR_writev:
5398 vec = alloca(count * sizeof(struct iovec));
5399 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5401 ret = get_errno(writev(arg1, vec, count));
5402 unlock_iovec(vec, arg2, count, 0);
5405 case TARGET_NR_getsid:
5406 ret = get_errno(getsid(arg1));
5408 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5409 case TARGET_NR_fdatasync:
5410 ret = get_errno(fdatasync(arg1));
5413 case TARGET_NR__sysctl:
5414 /* We don't implement this, but ENOTDIR is always a safe
5416 ret = -TARGET_ENOTDIR;
5418 case TARGET_NR_sched_setparam:
5420 struct sched_param *target_schp;
5421 struct sched_param schp;
5423 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5425 schp.sched_priority = tswap32(target_schp->sched_priority);
5426 unlock_user_struct(target_schp, arg2, 0);
5427 ret = get_errno(sched_setparam(arg1, &schp));
5430 case TARGET_NR_sched_getparam:
5432 struct sched_param *target_schp;
5433 struct sched_param schp;
5434 ret = get_errno(sched_getparam(arg1, &schp));
5435 if (!is_error(ret)) {
5436 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5438 target_schp->sched_priority = tswap32(schp.sched_priority);
5439 unlock_user_struct(target_schp, arg2, 1);
5443 case TARGET_NR_sched_setscheduler:
5445 struct sched_param *target_schp;
5446 struct sched_param schp;
5447 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5449 schp.sched_priority = tswap32(target_schp->sched_priority);
5450 unlock_user_struct(target_schp, arg3, 0);
5451 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5454 case TARGET_NR_sched_getscheduler:
5455 ret = get_errno(sched_getscheduler(arg1));
5457 case TARGET_NR_sched_yield:
5458 ret = get_errno(sched_yield());
5460 case TARGET_NR_sched_get_priority_max:
5461 ret = get_errno(sched_get_priority_max(arg1));
5463 case TARGET_NR_sched_get_priority_min:
5464 ret = get_errno(sched_get_priority_min(arg1));
5466 case TARGET_NR_sched_rr_get_interval:
5469 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5470 if (!is_error(ret)) {
5471 host_to_target_timespec(arg2, &ts);
5475 case TARGET_NR_nanosleep:
5477 struct timespec req, rem;
5478 target_to_host_timespec(&req, arg1);
5479 ret = get_errno(nanosleep(&req, &rem));
5480 if (is_error(ret) && arg2) {
5481 host_to_target_timespec(arg2, &rem);
5485 #ifdef TARGET_NR_query_module
5486 case TARGET_NR_query_module:
5489 #ifdef TARGET_NR_nfsservctl
5490 case TARGET_NR_nfsservctl:
5493 case TARGET_NR_prctl:
5496 case PR_GET_PDEATHSIG:
5499 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5500 if (!is_error(ret) && arg2
5501 && put_user_ual(deathsig, arg2))
5506 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5510 #ifdef TARGET_NR_arch_prctl
5511 case TARGET_NR_arch_prctl:
5512 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5513 ret = do_arch_prctl(cpu_env, arg1, arg2);
5519 #ifdef TARGET_NR_pread
5520 case TARGET_NR_pread:
5522 if (((CPUARMState *)cpu_env)->eabi)
5525 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5527 ret = get_errno(pread(arg1, p, arg3, arg4));
5528 unlock_user(p, arg2, ret);
5530 case TARGET_NR_pwrite:
5532 if (((CPUARMState *)cpu_env)->eabi)
5535 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5537 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5538 unlock_user(p, arg2, 0);
5541 #ifdef TARGET_NR_pread64
5542 case TARGET_NR_pread64:
5543 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5545 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5546 unlock_user(p, arg2, ret);
5548 case TARGET_NR_pwrite64:
5549 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5551 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5552 unlock_user(p, arg2, 0);
5555 case TARGET_NR_getcwd:
5556 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5558 ret = get_errno(sys_getcwd1(p, arg2));
5559 unlock_user(p, arg1, ret);
5561 case TARGET_NR_capget:
5563 case TARGET_NR_capset:
5565 case TARGET_NR_sigaltstack:
5566 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5567 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5568 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5573 case TARGET_NR_sendfile:
5575 #ifdef TARGET_NR_getpmsg
5576 case TARGET_NR_getpmsg:
5579 #ifdef TARGET_NR_putpmsg
5580 case TARGET_NR_putpmsg:
5583 #ifdef TARGET_NR_vfork
5584 case TARGET_NR_vfork:
5585 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5589 #ifdef TARGET_NR_ugetrlimit
5590 case TARGET_NR_ugetrlimit:
5593 ret = get_errno(getrlimit(arg1, &rlim));
5594 if (!is_error(ret)) {
5595 struct target_rlimit *target_rlim;
5596 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5598 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5599 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5600 unlock_user_struct(target_rlim, arg2, 1);
5605 #ifdef TARGET_NR_truncate64
5606 case TARGET_NR_truncate64:
5607 if (!(p = lock_user_string(arg1)))
5609 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5610 unlock_user(p, arg1, 0);
5613 #ifdef TARGET_NR_ftruncate64
5614 case TARGET_NR_ftruncate64:
5615 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5618 #ifdef TARGET_NR_stat64
5619 case TARGET_NR_stat64:
5620 if (!(p = lock_user_string(arg1)))
5622 ret = get_errno(stat(path(p), &st));
5623 unlock_user(p, arg1, 0);
5625 ret = host_to_target_stat64(cpu_env, arg2, &st);
5628 #ifdef TARGET_NR_lstat64
5629 case TARGET_NR_lstat64:
5630 if (!(p = lock_user_string(arg1)))
5632 ret = get_errno(lstat(path(p), &st));
5633 unlock_user(p, arg1, 0);
5635 ret = host_to_target_stat64(cpu_env, arg2, &st);
5638 #ifdef TARGET_NR_fstat64
5639 case TARGET_NR_fstat64:
5640 ret = get_errno(fstat(arg1, &st));
5642 ret = host_to_target_stat64(cpu_env, arg2, &st);
5645 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5646 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5647 #ifdef TARGET_NR_fstatat64
5648 case TARGET_NR_fstatat64:
5650 #ifdef TARGET_NR_newfstatat
5651 case TARGET_NR_newfstatat:
5653 if (!(p = lock_user_string(arg2)))
5655 #ifdef __NR_fstatat64
5656 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5658 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
5661 ret = host_to_target_stat64(cpu_env, arg3, &st);
5665 case TARGET_NR_lchown:
5666 if (!(p = lock_user_string(arg1)))
5668 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5669 unlock_user(p, arg1, 0);
5671 case TARGET_NR_getuid:
5672 ret = get_errno(high2lowuid(getuid()));
5674 case TARGET_NR_getgid:
5675 ret = get_errno(high2lowgid(getgid()));
5677 case TARGET_NR_geteuid:
5678 ret = get_errno(high2lowuid(geteuid()));
5680 case TARGET_NR_getegid:
5681 ret = get_errno(high2lowgid(getegid()));
5683 case TARGET_NR_setreuid:
5684 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5686 case TARGET_NR_setregid:
5687 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5689 case TARGET_NR_getgroups:
5691 int gidsetsize = arg1;
5692 uint16_t *target_grouplist;
5696 grouplist = alloca(gidsetsize * sizeof(gid_t));
5697 ret = get_errno(getgroups(gidsetsize, grouplist));
5698 if (gidsetsize == 0)
5700 if (!is_error(ret)) {
5701 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5702 if (!target_grouplist)
5704 for(i = 0;i < ret; i++)
5705 target_grouplist[i] = tswap16(grouplist[i]);
5706 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5710 case TARGET_NR_setgroups:
5712 int gidsetsize = arg1;
5713 uint16_t *target_grouplist;
5717 grouplist = alloca(gidsetsize * sizeof(gid_t));
5718 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5719 if (!target_grouplist) {
5720 ret = -TARGET_EFAULT;
5723 for(i = 0;i < gidsetsize; i++)
5724 grouplist[i] = tswap16(target_grouplist[i]);
5725 unlock_user(target_grouplist, arg2, 0);
5726 ret = get_errno(setgroups(gidsetsize, grouplist));
5729 case TARGET_NR_fchown:
5730 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5732 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5733 case TARGET_NR_fchownat:
5734 if (!(p = lock_user_string(arg2)))
5736 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5737 unlock_user(p, arg2, 0);
5740 #ifdef TARGET_NR_setresuid
5741 case TARGET_NR_setresuid:
5742 ret = get_errno(setresuid(low2highuid(arg1),
5744 low2highuid(arg3)));
5747 #ifdef TARGET_NR_getresuid
5748 case TARGET_NR_getresuid:
5750 uid_t ruid, euid, suid;
5751 ret = get_errno(getresuid(&ruid, &euid, &suid));
5752 if (!is_error(ret)) {
5753 if (put_user_u16(high2lowuid(ruid), arg1)
5754 || put_user_u16(high2lowuid(euid), arg2)
5755 || put_user_u16(high2lowuid(suid), arg3))
5761 #ifdef TARGET_NR_getresgid
5762 case TARGET_NR_setresgid:
5763 ret = get_errno(setresgid(low2highgid(arg1),
5765 low2highgid(arg3)));
5768 #ifdef TARGET_NR_getresgid
5769 case TARGET_NR_getresgid:
5771 gid_t rgid, egid, sgid;
5772 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5773 if (!is_error(ret)) {
5774 if (put_user_u16(high2lowgid(rgid), arg1)
5775 || put_user_u16(high2lowgid(egid), arg2)
5776 || put_user_u16(high2lowgid(sgid), arg3))
5782 case TARGET_NR_chown:
5783 if (!(p = lock_user_string(arg1)))
5785 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5786 unlock_user(p, arg1, 0);
5788 case TARGET_NR_setuid:
5789 ret = get_errno(setuid(low2highuid(arg1)));
5791 case TARGET_NR_setgid:
5792 ret = get_errno(setgid(low2highgid(arg1)));
5794 case TARGET_NR_setfsuid:
5795 ret = get_errno(setfsuid(arg1));
5797 case TARGET_NR_setfsgid:
5798 ret = get_errno(setfsgid(arg1));
5800 #endif /* USE_UID16 */
5802 #ifdef TARGET_NR_lchown32
5803 case TARGET_NR_lchown32:
5804 if (!(p = lock_user_string(arg1)))
5806 ret = get_errno(lchown(p, arg2, arg3));
5807 unlock_user(p, arg1, 0);
5810 #ifdef TARGET_NR_getuid32
5811 case TARGET_NR_getuid32:
5812 ret = get_errno(getuid());
5816 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5817 /* Alpha specific */
5818 case TARGET_NR_getxuid:
5822 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
5824 ret = get_errno(getuid());
5827 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5828 /* Alpha specific */
5829 case TARGET_NR_getxgid:
5833 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
5835 ret = get_errno(getgid());
5839 #ifdef TARGET_NR_getgid32
5840 case TARGET_NR_getgid32:
5841 ret = get_errno(getgid());
5844 #ifdef TARGET_NR_geteuid32
5845 case TARGET_NR_geteuid32:
5846 ret = get_errno(geteuid());
5849 #ifdef TARGET_NR_getegid32
5850 case TARGET_NR_getegid32:
5851 ret = get_errno(getegid());
5854 #ifdef TARGET_NR_setreuid32
5855 case TARGET_NR_setreuid32:
5856 ret = get_errno(setreuid(arg1, arg2));
5859 #ifdef TARGET_NR_setregid32
5860 case TARGET_NR_setregid32:
5861 ret = get_errno(setregid(arg1, arg2));
5864 #ifdef TARGET_NR_getgroups32
5865 case TARGET_NR_getgroups32:
5867 int gidsetsize = arg1;
5868 uint32_t *target_grouplist;
5872 grouplist = alloca(gidsetsize * sizeof(gid_t));
5873 ret = get_errno(getgroups(gidsetsize, grouplist));
5874 if (gidsetsize == 0)
5876 if (!is_error(ret)) {
5877 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
5878 if (!target_grouplist) {
5879 ret = -TARGET_EFAULT;
5882 for(i = 0;i < ret; i++)
5883 target_grouplist[i] = tswap32(grouplist[i]);
5884 unlock_user(target_grouplist, arg2, gidsetsize * 4);
5889 #ifdef TARGET_NR_setgroups32
5890 case TARGET_NR_setgroups32:
5892 int gidsetsize = arg1;
5893 uint32_t *target_grouplist;
5897 grouplist = alloca(gidsetsize * sizeof(gid_t));
5898 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
5899 if (!target_grouplist) {
5900 ret = -TARGET_EFAULT;
5903 for(i = 0;i < gidsetsize; i++)
5904 grouplist[i] = tswap32(target_grouplist[i]);
5905 unlock_user(target_grouplist, arg2, 0);
5906 ret = get_errno(setgroups(gidsetsize, grouplist));
5910 #ifdef TARGET_NR_fchown32
5911 case TARGET_NR_fchown32:
5912 ret = get_errno(fchown(arg1, arg2, arg3));
5915 #ifdef TARGET_NR_setresuid32
5916 case TARGET_NR_setresuid32:
5917 ret = get_errno(setresuid(arg1, arg2, arg3));
5920 #ifdef TARGET_NR_getresuid32
5921 case TARGET_NR_getresuid32:
5923 uid_t ruid, euid, suid;
5924 ret = get_errno(getresuid(&ruid, &euid, &suid));
5925 if (!is_error(ret)) {
5926 if (put_user_u32(ruid, arg1)
5927 || put_user_u32(euid, arg2)
5928 || put_user_u32(suid, arg3))
5934 #ifdef TARGET_NR_setresgid32
5935 case TARGET_NR_setresgid32:
5936 ret = get_errno(setresgid(arg1, arg2, arg3));
5939 #ifdef TARGET_NR_getresgid32
5940 case TARGET_NR_getresgid32:
5942 gid_t rgid, egid, sgid;
5943 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5944 if (!is_error(ret)) {
5945 if (put_user_u32(rgid, arg1)
5946 || put_user_u32(egid, arg2)
5947 || put_user_u32(sgid, arg3))
5953 #ifdef TARGET_NR_chown32
5954 case TARGET_NR_chown32:
5955 if (!(p = lock_user_string(arg1)))
5957 ret = get_errno(chown(p, arg2, arg3));
5958 unlock_user(p, arg1, 0);
5961 #ifdef TARGET_NR_setuid32
5962 case TARGET_NR_setuid32:
5963 ret = get_errno(setuid(arg1));
5966 #ifdef TARGET_NR_setgid32
5967 case TARGET_NR_setgid32:
5968 ret = get_errno(setgid(arg1));
5971 #ifdef TARGET_NR_setfsuid32
5972 case TARGET_NR_setfsuid32:
5973 ret = get_errno(setfsuid(arg1));
5976 #ifdef TARGET_NR_setfsgid32
5977 case TARGET_NR_setfsgid32:
5978 ret = get_errno(setfsgid(arg1));
5982 case TARGET_NR_pivot_root:
5984 #ifdef TARGET_NR_mincore
5985 case TARGET_NR_mincore:
5988 ret = -TARGET_EFAULT;
5989 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
5991 if (!(p = lock_user_string(arg3)))
5993 ret = get_errno(mincore(a, arg2, p));
5994 unlock_user(p, arg3, ret);
5996 unlock_user(a, arg1, 0);
6000 #ifdef TARGET_NR_arm_fadvise64_64
6001 case TARGET_NR_arm_fadvise64_64:
6004 * arm_fadvise64_64 looks like fadvise64_64 but
6005 * with different argument order
6013 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6014 #ifdef TARGET_NR_fadvise64_64
6015 case TARGET_NR_fadvise64_64:
6017 /* This is a hint, so ignoring and returning success is ok. */
6021 #ifdef TARGET_NR_madvise
6022 case TARGET_NR_madvise:
6023 /* A straight passthrough may not be safe because qemu sometimes
6024 turns private flie-backed mappings into anonymous mappings.
6025 This will break MADV_DONTNEED.
6026 This is a hint, so ignoring and returning success is ok. */
6030 #if TARGET_ABI_BITS == 32
6031 case TARGET_NR_fcntl64:
6035 struct target_flock64 *target_fl;
6037 struct target_eabi_flock64 *target_efl;
6041 case TARGET_F_GETLK64:
6044 case TARGET_F_SETLK64:
6047 case TARGET_F_SETLKW64:
6056 case TARGET_F_GETLK64:
6058 if (((CPUARMState *)cpu_env)->eabi) {
6059 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6061 fl.l_type = tswap16(target_efl->l_type);
6062 fl.l_whence = tswap16(target_efl->l_whence);
6063 fl.l_start = tswap64(target_efl->l_start);
6064 fl.l_len = tswap64(target_efl->l_len);
6065 fl.l_pid = tswapl(target_efl->l_pid);
6066 unlock_user_struct(target_efl, arg3, 0);
6070 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6072 fl.l_type = tswap16(target_fl->l_type);
6073 fl.l_whence = tswap16(target_fl->l_whence);
6074 fl.l_start = tswap64(target_fl->l_start);
6075 fl.l_len = tswap64(target_fl->l_len);
6076 fl.l_pid = tswapl(target_fl->l_pid);
6077 unlock_user_struct(target_fl, arg3, 0);
6079 ret = get_errno(fcntl(arg1, cmd, &fl));
6082 if (((CPUARMState *)cpu_env)->eabi) {
6083 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6085 target_efl->l_type = tswap16(fl.l_type);
6086 target_efl->l_whence = tswap16(fl.l_whence);
6087 target_efl->l_start = tswap64(fl.l_start);
6088 target_efl->l_len = tswap64(fl.l_len);
6089 target_efl->l_pid = tswapl(fl.l_pid);
6090 unlock_user_struct(target_efl, arg3, 1);
6094 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6096 target_fl->l_type = tswap16(fl.l_type);
6097 target_fl->l_whence = tswap16(fl.l_whence);
6098 target_fl->l_start = tswap64(fl.l_start);
6099 target_fl->l_len = tswap64(fl.l_len);
6100 target_fl->l_pid = tswapl(fl.l_pid);
6101 unlock_user_struct(target_fl, arg3, 1);
6106 case TARGET_F_SETLK64:
6107 case TARGET_F_SETLKW64:
6109 if (((CPUARMState *)cpu_env)->eabi) {
6110 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6112 fl.l_type = tswap16(target_efl->l_type);
6113 fl.l_whence = tswap16(target_efl->l_whence);
6114 fl.l_start = tswap64(target_efl->l_start);
6115 fl.l_len = tswap64(target_efl->l_len);
6116 fl.l_pid = tswapl(target_efl->l_pid);
6117 unlock_user_struct(target_efl, arg3, 0);
6121 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6123 fl.l_type = tswap16(target_fl->l_type);
6124 fl.l_whence = tswap16(target_fl->l_whence);
6125 fl.l_start = tswap64(target_fl->l_start);
6126 fl.l_len = tswap64(target_fl->l_len);
6127 fl.l_pid = tswapl(target_fl->l_pid);
6128 unlock_user_struct(target_fl, arg3, 0);
6130 ret = get_errno(fcntl(arg1, cmd, &fl));
6133 ret = do_fcntl(arg1, cmd, arg3);
6139 #ifdef TARGET_NR_cacheflush
6140 case TARGET_NR_cacheflush:
6141 /* self-modifying code is handled automatically, so nothing needed */
6145 #ifdef TARGET_NR_security
6146 case TARGET_NR_security:
6149 #ifdef TARGET_NR_getpagesize
6150 case TARGET_NR_getpagesize:
6151 ret = TARGET_PAGE_SIZE;
6154 case TARGET_NR_gettid:
6155 ret = get_errno(gettid());
6157 #ifdef TARGET_NR_readahead
6158 case TARGET_NR_readahead:
6159 #if TARGET_ABI_BITS == 32
6161 if (((CPUARMState *)cpu_env)->eabi)
6168 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6170 ret = get_errno(readahead(arg1, arg2, arg3));
6174 #ifdef TARGET_NR_setxattr
6175 case TARGET_NR_setxattr:
6176 case TARGET_NR_lsetxattr:
6177 case TARGET_NR_fsetxattr:
6178 case TARGET_NR_getxattr:
6179 case TARGET_NR_lgetxattr:
6180 case TARGET_NR_fgetxattr:
6181 case TARGET_NR_listxattr:
6182 case TARGET_NR_llistxattr:
6183 case TARGET_NR_flistxattr:
6184 case TARGET_NR_removexattr:
6185 case TARGET_NR_lremovexattr:
6186 case TARGET_NR_fremovexattr:
6187 goto unimplemented_nowarn;
6189 #ifdef TARGET_NR_set_thread_area
6190 case TARGET_NR_set_thread_area:
6191 #if defined(TARGET_MIPS)
6192 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6195 #elif defined(TARGET_CRIS)
6197 ret = -TARGET_EINVAL;
6199 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6203 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6204 ret = do_set_thread_area(cpu_env, arg1);
6207 goto unimplemented_nowarn;
6210 #ifdef TARGET_NR_get_thread_area
6211 case TARGET_NR_get_thread_area:
6212 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6213 ret = do_get_thread_area(cpu_env, arg1);
6215 goto unimplemented_nowarn;
6218 #ifdef TARGET_NR_getdomainname
6219 case TARGET_NR_getdomainname:
6220 goto unimplemented_nowarn;
6223 #ifdef TARGET_NR_clock_gettime
6224 case TARGET_NR_clock_gettime:
6227 ret = get_errno(clock_gettime(arg1, &ts));
6228 if (!is_error(ret)) {
6229 host_to_target_timespec(arg2, &ts);
6234 #ifdef TARGET_NR_clock_getres
6235 case TARGET_NR_clock_getres:
6238 ret = get_errno(clock_getres(arg1, &ts));
6239 if (!is_error(ret)) {
6240 host_to_target_timespec(arg2, &ts);
6245 #ifdef TARGET_NR_clock_nanosleep
6246 case TARGET_NR_clock_nanosleep:
6249 target_to_host_timespec(&ts, arg3);
6250 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6252 host_to_target_timespec(arg4, &ts);
6257 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6258 case TARGET_NR_set_tid_address:
6259 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6263 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6264 case TARGET_NR_tkill:
6265 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6269 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6270 case TARGET_NR_tgkill:
6271 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6272 target_to_host_signal(arg3)));
6276 #ifdef TARGET_NR_set_robust_list
6277 case TARGET_NR_set_robust_list:
6278 goto unimplemented_nowarn;
6281 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6282 case TARGET_NR_utimensat:
6284 struct timespec ts[2];
6285 target_to_host_timespec(ts, arg3);
6286 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6288 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6290 if (!(p = lock_user_string(arg2))) {
6291 ret = -TARGET_EFAULT;
6294 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6295 unlock_user(p, arg2, 0);
6300 #if defined(USE_NPTL)
6301 case TARGET_NR_futex:
6302 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6305 #ifdef TARGET_NR_inotify_init
6306 case TARGET_NR_inotify_init:
6307 ret = get_errno(sys_inotify_init());
6310 #ifdef TARGET_NR_inotify_add_watch
6311 case TARGET_NR_inotify_add_watch:
6312 p = lock_user_string(arg2);
6313 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6314 unlock_user(p, arg2, 0);
6317 #ifdef TARGET_NR_inotify_rm_watch
6318 case TARGET_NR_inotify_rm_watch:
6319 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6325 gemu_log("qemu: Unsupported syscall: %d\n", num);
6326 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6327 unimplemented_nowarn:
6329 ret = -TARGET_ENOSYS;
6334 gemu_log(" = %ld\n", ret);
6337 print_syscall_ret(num, ret);
6340 ret = -TARGET_EFAULT;