4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
80 #include "linux_loop.h"
83 #include "qemu-common.h"
86 #include <linux/futex.h>
87 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
88 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
90 /* XXX: Hardcode the above values. */
91 #define CLONE_NPTL_FLAGS2 0
96 //#include <linux/msdos_fs.h>
97 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
98 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
109 #define _syscall0(type,name) \
110 static type name (void) \
112 return syscall(__NR_##name); \
115 #define _syscall1(type,name,type1,arg1) \
116 static type name (type1 arg1) \
118 return syscall(__NR_##name, arg1); \
121 #define _syscall2(type,name,type1,arg1,type2,arg2) \
122 static type name (type1 arg1,type2 arg2) \
124 return syscall(__NR_##name, arg1, arg2); \
127 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
128 static type name (type1 arg1,type2 arg2,type3 arg3) \
130 return syscall(__NR_##name, arg1, arg2, arg3); \
133 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
134 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
136 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
139 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
141 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
143 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
147 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
148 type5,arg5,type6,arg6) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
152 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
156 #define __NR_sys_uname __NR_uname
157 #define __NR_sys_faccessat __NR_faccessat
158 #define __NR_sys_fchmodat __NR_fchmodat
159 #define __NR_sys_fchownat __NR_fchownat
160 #define __NR_sys_fstatat64 __NR_fstatat64
161 #define __NR_sys_futimesat __NR_futimesat
162 #define __NR_sys_getcwd1 __NR_getcwd
163 #define __NR_sys_getdents __NR_getdents
164 #define __NR_sys_getdents64 __NR_getdents64
165 #define __NR_sys_getpriority __NR_getpriority
166 #define __NR_sys_linkat __NR_linkat
167 #define __NR_sys_mkdirat __NR_mkdirat
168 #define __NR_sys_mknodat __NR_mknodat
169 #define __NR_sys_newfstatat __NR_newfstatat
170 #define __NR_sys_openat __NR_openat
171 #define __NR_sys_readlinkat __NR_readlinkat
172 #define __NR_sys_renameat __NR_renameat
173 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
174 #define __NR_sys_symlinkat __NR_symlinkat
175 #define __NR_sys_syslog __NR_syslog
176 #define __NR_sys_tgkill __NR_tgkill
177 #define __NR_sys_tkill __NR_tkill
178 #define __NR_sys_unlinkat __NR_unlinkat
179 #define __NR_sys_utimensat __NR_utimensat
180 #define __NR_sys_futex __NR_futex
181 #define __NR_sys_inotify_init __NR_inotify_init
182 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
183 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
185 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
186 #define __NR__llseek __NR_lseek
190 _syscall0(int, gettid)
192 /* This is a replacement for the host gettid() and must return a host
194 static int gettid(void) {
198 #if TARGET_ABI_BITS == 32
199 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
201 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
202 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
204 _syscall2(int, sys_getpriority, int, which, int, who);
205 #if !defined (__x86_64__)
206 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
207 loff_t *, res, uint, wh);
209 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
210 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
211 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
212 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
214 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
215 _syscall2(int,sys_tkill,int,tid,int,sig)
217 #ifdef __NR_exit_group
218 _syscall1(int,exit_group,int,error_code)
220 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
221 _syscall1(int,set_tid_address,int *,tidptr)
223 #if defined(USE_NPTL)
224 #if defined(TARGET_NR_futex) && defined(__NR_futex)
225 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
226 const struct timespec *,timeout,int *,uaddr2,int,val3)
230 static bitmask_transtbl fcntl_flags_tbl[] = {
231 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
232 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
233 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
234 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
235 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
236 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
237 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
238 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
239 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
240 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
241 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
242 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
243 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
244 #if defined(O_DIRECT)
245 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
250 #define COPY_UTSNAME_FIELD(dest, src) \
252 /* __NEW_UTS_LEN doesn't include terminating null */ \
253 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
254 (dest)[__NEW_UTS_LEN] = '\0'; \
257 static int sys_uname(struct new_utsname *buf)
259 struct utsname uts_buf;
261 if (uname(&uts_buf) < 0)
265 * Just in case these have some differences, we
266 * translate utsname to new_utsname (which is the
267 * struct linux kernel uses).
270 bzero(buf, sizeof (*buf));
271 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
272 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
273 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
274 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
275 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
277 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
281 #undef COPY_UTSNAME_FIELD
284 static int sys_getcwd1(char *buf, size_t size)
286 if (getcwd(buf, size) == NULL) {
287 /* getcwd() sets errno */
290 return strlen(buf)+1;
295 * Host system seems to have atfile syscall stubs available. We
296 * now enable them one by one as specified by target syscall_nr.h.
299 #ifdef TARGET_NR_faccessat
300 static int sys_faccessat(int dirfd, const char *pathname, int mode)
302 return (faccessat(dirfd, pathname, mode, 0));
305 #ifdef TARGET_NR_fchmodat
306 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
308 return (fchmodat(dirfd, pathname, mode, 0));
311 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
312 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
313 gid_t group, int flags)
315 return (fchownat(dirfd, pathname, owner, group, flags));
318 #ifdef __NR_fstatat64
319 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
322 return (fstatat(dirfd, pathname, buf, flags));
325 #ifdef __NR_newfstatat
326 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
329 return (fstatat(dirfd, pathname, buf, flags));
332 #ifdef TARGET_NR_futimesat
333 static int sys_futimesat(int dirfd, const char *pathname,
334 const struct timeval times[2])
336 return (futimesat(dirfd, pathname, times));
339 #ifdef TARGET_NR_linkat
340 static int sys_linkat(int olddirfd, const char *oldpath,
341 int newdirfd, const char *newpath, int flags)
343 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
346 #ifdef TARGET_NR_mkdirat
347 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
349 return (mkdirat(dirfd, pathname, mode));
352 #ifdef TARGET_NR_mknodat
353 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
356 return (mknodat(dirfd, pathname, mode, dev));
359 #ifdef TARGET_NR_openat
360 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
363 * open(2) has extra parameter 'mode' when called with
366 if ((flags & O_CREAT) != 0) {
371 * Get the 'mode' parameter and translate it to
375 mode = va_arg(ap, mode_t);
376 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
379 return (openat(dirfd, pathname, flags, mode));
381 return (openat(dirfd, pathname, flags));
384 #ifdef TARGET_NR_readlinkat
385 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
387 return (readlinkat(dirfd, pathname, buf, bufsiz));
390 #ifdef TARGET_NR_renameat
391 static int sys_renameat(int olddirfd, const char *oldpath,
392 int newdirfd, const char *newpath)
394 return (renameat(olddirfd, oldpath, newdirfd, newpath));
397 #ifdef TARGET_NR_symlinkat
398 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
400 return (symlinkat(oldpath, newdirfd, newpath));
403 #ifdef TARGET_NR_unlinkat
404 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
406 return (unlinkat(dirfd, pathname, flags));
409 #else /* !CONFIG_ATFILE */
412 * Try direct syscalls instead
414 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
415 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
417 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
418 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
420 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
421 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
422 uid_t,owner,gid_t,group,int,flags)
424 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
425 defined(__NR_fstatat64)
426 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
427 struct stat *,buf,int,flags)
429 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
430 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
431 const struct timeval *,times)
433 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
434 defined(__NR_newfstatat)
435 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
436 struct stat *,buf,int,flags)
438 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
439 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
440 int,newdirfd,const char *,newpath,int,flags)
442 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
443 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
445 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
446 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
447 mode_t,mode,dev_t,dev)
449 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
450 defined(__NR_newfstatat)
451 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
452 struct stat *,buf,int,flags)
454 #if defined(TARGET_NR_openat) && defined(__NR_openat)
455 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
457 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
458 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
459 char *,buf,size_t,bufsize)
461 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
462 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
463 int,newdirfd,const char *,newpath)
465 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
466 _syscall3(int,sys_symlinkat,const char *,oldpath,
467 int,newdirfd,const char *,newpath)
469 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
470 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
473 #endif /* CONFIG_ATFILE */
475 #ifdef CONFIG_UTIMENSAT
476 static int sys_utimensat(int dirfd, const char *pathname,
477 const struct timespec times[2], int flags)
479 if (pathname == NULL)
480 return futimens(dirfd, times);
482 return utimensat(dirfd, pathname, times, flags);
485 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
486 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
487 const struct timespec *,tsp,int,flags)
489 #endif /* CONFIG_UTIMENSAT */
491 #ifdef CONFIG_INOTIFY
492 #include <sys/inotify.h>
494 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
495 static int sys_inotify_init(void)
497 return (inotify_init());
500 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
501 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
503 return (inotify_add_watch(fd, pathname, mask));
506 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
507 static int sys_inotify_rm_watch(int fd, int32_t wd)
509 return (inotify_rm_watch(fd, wd));
513 /* Userspace can usually survive runtime without inotify */
514 #undef TARGET_NR_inotify_init
515 #undef TARGET_NR_inotify_add_watch
516 #undef TARGET_NR_inotify_rm_watch
517 #endif /* CONFIG_INOTIFY */
520 extern int personality(int);
521 extern int flock(int, int);
522 extern int setfsuid(int);
523 extern int setfsgid(int);
524 extern int setgroups(int, gid_t *);
526 #define ERRNO_TABLE_SIZE 1200
528 /* target_to_host_errno_table[] is initialized from
529 * host_to_target_errno_table[] in syscall_init(). */
530 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
534 * This list is the union of errno values overridden in asm-<arch>/errno.h
535 * minus the errnos that are not actually generic to all archs.
537 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
538 [EIDRM] = TARGET_EIDRM,
539 [ECHRNG] = TARGET_ECHRNG,
540 [EL2NSYNC] = TARGET_EL2NSYNC,
541 [EL3HLT] = TARGET_EL3HLT,
542 [EL3RST] = TARGET_EL3RST,
543 [ELNRNG] = TARGET_ELNRNG,
544 [EUNATCH] = TARGET_EUNATCH,
545 [ENOCSI] = TARGET_ENOCSI,
546 [EL2HLT] = TARGET_EL2HLT,
547 [EDEADLK] = TARGET_EDEADLK,
548 [ENOLCK] = TARGET_ENOLCK,
549 [EBADE] = TARGET_EBADE,
550 [EBADR] = TARGET_EBADR,
551 [EXFULL] = TARGET_EXFULL,
552 [ENOANO] = TARGET_ENOANO,
553 [EBADRQC] = TARGET_EBADRQC,
554 [EBADSLT] = TARGET_EBADSLT,
555 [EBFONT] = TARGET_EBFONT,
556 [ENOSTR] = TARGET_ENOSTR,
557 [ENODATA] = TARGET_ENODATA,
558 [ETIME] = TARGET_ETIME,
559 [ENOSR] = TARGET_ENOSR,
560 [ENONET] = TARGET_ENONET,
561 [ENOPKG] = TARGET_ENOPKG,
562 [EREMOTE] = TARGET_EREMOTE,
563 [ENOLINK] = TARGET_ENOLINK,
564 [EADV] = TARGET_EADV,
565 [ESRMNT] = TARGET_ESRMNT,
566 [ECOMM] = TARGET_ECOMM,
567 [EPROTO] = TARGET_EPROTO,
568 [EDOTDOT] = TARGET_EDOTDOT,
569 [EMULTIHOP] = TARGET_EMULTIHOP,
570 [EBADMSG] = TARGET_EBADMSG,
571 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
572 [EOVERFLOW] = TARGET_EOVERFLOW,
573 [ENOTUNIQ] = TARGET_ENOTUNIQ,
574 [EBADFD] = TARGET_EBADFD,
575 [EREMCHG] = TARGET_EREMCHG,
576 [ELIBACC] = TARGET_ELIBACC,
577 [ELIBBAD] = TARGET_ELIBBAD,
578 [ELIBSCN] = TARGET_ELIBSCN,
579 [ELIBMAX] = TARGET_ELIBMAX,
580 [ELIBEXEC] = TARGET_ELIBEXEC,
581 [EILSEQ] = TARGET_EILSEQ,
582 [ENOSYS] = TARGET_ENOSYS,
583 [ELOOP] = TARGET_ELOOP,
584 [ERESTART] = TARGET_ERESTART,
585 [ESTRPIPE] = TARGET_ESTRPIPE,
586 [ENOTEMPTY] = TARGET_ENOTEMPTY,
587 [EUSERS] = TARGET_EUSERS,
588 [ENOTSOCK] = TARGET_ENOTSOCK,
589 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
590 [EMSGSIZE] = TARGET_EMSGSIZE,
591 [EPROTOTYPE] = TARGET_EPROTOTYPE,
592 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
593 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
594 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
595 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
596 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
597 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
598 [EADDRINUSE] = TARGET_EADDRINUSE,
599 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
600 [ENETDOWN] = TARGET_ENETDOWN,
601 [ENETUNREACH] = TARGET_ENETUNREACH,
602 [ENETRESET] = TARGET_ENETRESET,
603 [ECONNABORTED] = TARGET_ECONNABORTED,
604 [ECONNRESET] = TARGET_ECONNRESET,
605 [ENOBUFS] = TARGET_ENOBUFS,
606 [EISCONN] = TARGET_EISCONN,
607 [ENOTCONN] = TARGET_ENOTCONN,
608 [EUCLEAN] = TARGET_EUCLEAN,
609 [ENOTNAM] = TARGET_ENOTNAM,
610 [ENAVAIL] = TARGET_ENAVAIL,
611 [EISNAM] = TARGET_EISNAM,
612 [EREMOTEIO] = TARGET_EREMOTEIO,
613 [ESHUTDOWN] = TARGET_ESHUTDOWN,
614 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
615 [ETIMEDOUT] = TARGET_ETIMEDOUT,
616 [ECONNREFUSED] = TARGET_ECONNREFUSED,
617 [EHOSTDOWN] = TARGET_EHOSTDOWN,
618 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
619 [EALREADY] = TARGET_EALREADY,
620 [EINPROGRESS] = TARGET_EINPROGRESS,
621 [ESTALE] = TARGET_ESTALE,
622 [ECANCELED] = TARGET_ECANCELED,
623 [ENOMEDIUM] = TARGET_ENOMEDIUM,
624 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
626 [ENOKEY] = TARGET_ENOKEY,
629 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
632 [EKEYREVOKED] = TARGET_EKEYREVOKED,
635 [EKEYREJECTED] = TARGET_EKEYREJECTED,
638 [EOWNERDEAD] = TARGET_EOWNERDEAD,
640 #ifdef ENOTRECOVERABLE
641 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
645 static inline int host_to_target_errno(int err)
647 if(host_to_target_errno_table[err])
648 return host_to_target_errno_table[err];
652 static inline int target_to_host_errno(int err)
654 if (target_to_host_errno_table[err])
655 return target_to_host_errno_table[err];
659 static inline abi_long get_errno(abi_long ret)
662 return -host_to_target_errno(errno);
667 static inline int is_error(abi_long ret)
669 return (abi_ulong)ret >= (abi_ulong)(-4096);
672 char *target_strerror(int err)
674 return strerror(target_to_host_errno(err));
677 static abi_ulong target_brk;
678 static abi_ulong target_original_brk;
680 void target_set_brk(abi_ulong new_brk)
682 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
685 /* do_brk() must return target values and target errnos. */
686 abi_long do_brk(abi_ulong new_brk)
689 abi_long mapped_addr;
694 if (new_brk < target_original_brk)
697 brk_page = HOST_PAGE_ALIGN(target_brk);
699 /* If the new brk is less than this, set it and we're done... */
700 if (new_brk < brk_page) {
701 target_brk = new_brk;
705 /* We need to allocate more memory after the brk... */
706 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
707 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
708 PROT_READ|PROT_WRITE,
709 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
711 if (!is_error(mapped_addr))
712 target_brk = new_brk;
717 static inline abi_long copy_from_user_fdset(fd_set *fds,
718 abi_ulong target_fds_addr,
722 abi_ulong b, *target_fds;
724 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
725 if (!(target_fds = lock_user(VERIFY_READ,
727 sizeof(abi_ulong) * nw,
729 return -TARGET_EFAULT;
733 for (i = 0; i < nw; i++) {
734 /* grab the abi_ulong */
735 __get_user(b, &target_fds[i]);
736 for (j = 0; j < TARGET_ABI_BITS; j++) {
737 /* check the bit inside the abi_ulong */
744 unlock_user(target_fds, target_fds_addr, 0);
749 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
755 abi_ulong *target_fds;
757 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
758 if (!(target_fds = lock_user(VERIFY_WRITE,
760 sizeof(abi_ulong) * nw,
762 return -TARGET_EFAULT;
765 for (i = 0; i < nw; i++) {
767 for (j = 0; j < TARGET_ABI_BITS; j++) {
768 v |= ((FD_ISSET(k, fds) != 0) << j);
771 __put_user(v, &target_fds[i]);
774 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
779 #if defined(__alpha__)
785 static inline abi_long host_to_target_clock_t(long ticks)
787 #if HOST_HZ == TARGET_HZ
790 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
794 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
795 const struct rusage *rusage)
797 struct target_rusage *target_rusage;
799 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
800 return -TARGET_EFAULT;
801 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
802 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
803 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
804 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
805 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
806 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
807 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
808 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
809 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
810 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
811 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
812 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
813 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
814 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
815 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
816 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
817 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
818 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
819 unlock_user_struct(target_rusage, target_addr, 1);
824 static inline abi_long copy_from_user_timeval(struct timeval *tv,
825 abi_ulong target_tv_addr)
827 struct target_timeval *target_tv;
829 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
830 return -TARGET_EFAULT;
832 __get_user(tv->tv_sec, &target_tv->tv_sec);
833 __get_user(tv->tv_usec, &target_tv->tv_usec);
835 unlock_user_struct(target_tv, target_tv_addr, 0);
840 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
841 const struct timeval *tv)
843 struct target_timeval *target_tv;
845 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
846 return -TARGET_EFAULT;
848 __put_user(tv->tv_sec, &target_tv->tv_sec);
849 __put_user(tv->tv_usec, &target_tv->tv_usec);
851 unlock_user_struct(target_tv, target_tv_addr, 1);
856 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
857 abi_ulong target_mq_attr_addr)
859 struct target_mq_attr *target_mq_attr;
861 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
862 target_mq_attr_addr, 1))
863 return -TARGET_EFAULT;
865 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
866 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
867 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
868 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
870 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
875 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
876 const struct mq_attr *attr)
878 struct target_mq_attr *target_mq_attr;
880 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
881 target_mq_attr_addr, 0))
882 return -TARGET_EFAULT;
884 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
885 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
886 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
887 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
889 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
894 /* do_select() must return target values and target errnos. */
895 static abi_long do_select(int n,
896 abi_ulong rfd_addr, abi_ulong wfd_addr,
897 abi_ulong efd_addr, abi_ulong target_tv_addr)
899 fd_set rfds, wfds, efds;
900 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
901 struct timeval tv, *tv_ptr;
905 if (copy_from_user_fdset(&rfds, rfd_addr, n))
906 return -TARGET_EFAULT;
912 if (copy_from_user_fdset(&wfds, wfd_addr, n))
913 return -TARGET_EFAULT;
919 if (copy_from_user_fdset(&efds, efd_addr, n))
920 return -TARGET_EFAULT;
926 if (target_tv_addr) {
927 if (copy_from_user_timeval(&tv, target_tv_addr))
928 return -TARGET_EFAULT;
934 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
936 if (!is_error(ret)) {
937 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
938 return -TARGET_EFAULT;
939 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
940 return -TARGET_EFAULT;
941 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
942 return -TARGET_EFAULT;
944 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
945 return -TARGET_EFAULT;
951 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
952 abi_ulong target_addr,
955 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
956 sa_family_t sa_family;
957 struct target_sockaddr *target_saddr;
959 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
961 return -TARGET_EFAULT;
963 sa_family = tswap16(target_saddr->sa_family);
965 /* Oops. The caller might send a incomplete sun_path; sun_path
966 * must be terminated by \0 (see the manual page), but
967 * unfortunately it is quite common to specify sockaddr_un
968 * length as "strlen(x->sun_path)" while it should be
969 * "strlen(...) + 1". We'll fix that here if needed.
970 * Linux kernel has a similar feature.
973 if (sa_family == AF_UNIX) {
974 if (len < unix_maxlen && len > 0) {
975 char *cp = (char*)target_saddr;
977 if ( cp[len-1] && !cp[len] )
980 if (len > unix_maxlen)
984 memcpy(addr, target_saddr, len);
985 addr->sa_family = sa_family;
986 unlock_user(target_saddr, target_addr, 0);
991 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
992 struct sockaddr *addr,
995 struct target_sockaddr *target_saddr;
997 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
999 return -TARGET_EFAULT;
1000 memcpy(target_saddr, addr, len);
1001 target_saddr->sa_family = tswap16(addr->sa_family);
1002 unlock_user(target_saddr, target_addr, len);
1007 /* ??? Should this also swap msgh->name? */
1008 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1009 struct target_msghdr *target_msgh)
1011 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1012 abi_long msg_controllen;
1013 abi_ulong target_cmsg_addr;
1014 struct target_cmsghdr *target_cmsg;
1015 socklen_t space = 0;
1017 msg_controllen = tswapl(target_msgh->msg_controllen);
1018 if (msg_controllen < sizeof (struct target_cmsghdr))
1020 target_cmsg_addr = tswapl(target_msgh->msg_control);
1021 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1023 return -TARGET_EFAULT;
1025 while (cmsg && target_cmsg) {
1026 void *data = CMSG_DATA(cmsg);
1027 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1029 int len = tswapl(target_cmsg->cmsg_len)
1030 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1032 space += CMSG_SPACE(len);
1033 if (space > msgh->msg_controllen) {
1034 space -= CMSG_SPACE(len);
1035 gemu_log("Host cmsg overflow\n");
1039 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1040 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1041 cmsg->cmsg_len = CMSG_LEN(len);
1043 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1044 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1045 memcpy(data, target_data, len);
1047 int *fd = (int *)data;
1048 int *target_fd = (int *)target_data;
1049 int i, numfds = len / sizeof(int);
1051 for (i = 0; i < numfds; i++)
1052 fd[i] = tswap32(target_fd[i]);
1055 cmsg = CMSG_NXTHDR(msgh, cmsg);
1056 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1058 unlock_user(target_cmsg, target_cmsg_addr, 0);
1060 msgh->msg_controllen = space;
1064 /* ??? Should this also swap msgh->name? */
1065 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1066 struct msghdr *msgh)
1068 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1069 abi_long msg_controllen;
1070 abi_ulong target_cmsg_addr;
1071 struct target_cmsghdr *target_cmsg;
1072 socklen_t space = 0;
1074 msg_controllen = tswapl(target_msgh->msg_controllen);
1075 if (msg_controllen < sizeof (struct target_cmsghdr))
1077 target_cmsg_addr = tswapl(target_msgh->msg_control);
1078 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1080 return -TARGET_EFAULT;
1082 while (cmsg && target_cmsg) {
1083 void *data = CMSG_DATA(cmsg);
1084 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1086 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1088 space += TARGET_CMSG_SPACE(len);
1089 if (space > msg_controllen) {
1090 space -= TARGET_CMSG_SPACE(len);
1091 gemu_log("Target cmsg overflow\n");
1095 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1096 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1097 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1099 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1100 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1101 memcpy(target_data, data, len);
1103 int *fd = (int *)data;
1104 int *target_fd = (int *)target_data;
1105 int i, numfds = len / sizeof(int);
1107 for (i = 0; i < numfds; i++)
1108 target_fd[i] = tswap32(fd[i]);
1111 cmsg = CMSG_NXTHDR(msgh, cmsg);
1112 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1114 unlock_user(target_cmsg, target_cmsg_addr, space);
1116 target_msgh->msg_controllen = tswapl(space);
1120 /* do_setsockopt() Must return target values and target errnos. */
1121 static abi_long do_setsockopt(int sockfd, int level, int optname,
1122 abi_ulong optval_addr, socklen_t optlen)
1129 /* TCP options all take an 'int' value. */
1130 if (optlen < sizeof(uint32_t))
1131 return -TARGET_EINVAL;
1133 if (get_user_u32(val, optval_addr))
1134 return -TARGET_EFAULT;
1135 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1142 case IP_ROUTER_ALERT:
1146 case IP_MTU_DISCOVER:
1152 case IP_MULTICAST_TTL:
1153 case IP_MULTICAST_LOOP:
1155 if (optlen >= sizeof(uint32_t)) {
1156 if (get_user_u32(val, optval_addr))
1157 return -TARGET_EFAULT;
1158 } else if (optlen >= 1) {
1159 if (get_user_u8(val, optval_addr))
1160 return -TARGET_EFAULT;
1162 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1168 case TARGET_SOL_SOCKET:
1170 /* Options with 'int' argument. */
1171 case TARGET_SO_DEBUG:
1174 case TARGET_SO_REUSEADDR:
1175 optname = SO_REUSEADDR;
1177 case TARGET_SO_TYPE:
1180 case TARGET_SO_ERROR:
1183 case TARGET_SO_DONTROUTE:
1184 optname = SO_DONTROUTE;
1186 case TARGET_SO_BROADCAST:
1187 optname = SO_BROADCAST;
1189 case TARGET_SO_SNDBUF:
1190 optname = SO_SNDBUF;
1192 case TARGET_SO_RCVBUF:
1193 optname = SO_RCVBUF;
1195 case TARGET_SO_KEEPALIVE:
1196 optname = SO_KEEPALIVE;
1198 case TARGET_SO_OOBINLINE:
1199 optname = SO_OOBINLINE;
1201 case TARGET_SO_NO_CHECK:
1202 optname = SO_NO_CHECK;
1204 case TARGET_SO_PRIORITY:
1205 optname = SO_PRIORITY;
1208 case TARGET_SO_BSDCOMPAT:
1209 optname = SO_BSDCOMPAT;
1212 case TARGET_SO_PASSCRED:
1213 optname = SO_PASSCRED;
1215 case TARGET_SO_TIMESTAMP:
1216 optname = SO_TIMESTAMP;
1218 case TARGET_SO_RCVLOWAT:
1219 optname = SO_RCVLOWAT;
1221 case TARGET_SO_RCVTIMEO:
1222 optname = SO_RCVTIMEO;
1224 case TARGET_SO_SNDTIMEO:
1225 optname = SO_SNDTIMEO;
1231 if (optlen < sizeof(uint32_t))
1232 return -TARGET_EINVAL;
1234 if (get_user_u32(val, optval_addr))
1235 return -TARGET_EFAULT;
1236 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1240 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1241 ret = -TARGET_ENOPROTOOPT;
1246 /* do_getsockopt() Must return target values and target errnos. */
1247 static abi_long do_getsockopt(int sockfd, int level, int optname,
1248 abi_ulong optval_addr, abi_ulong optlen)
1255 case TARGET_SOL_SOCKET:
1258 case TARGET_SO_LINGER:
1259 case TARGET_SO_RCVTIMEO:
1260 case TARGET_SO_SNDTIMEO:
1261 case TARGET_SO_PEERCRED:
1262 case TARGET_SO_PEERNAME:
1263 /* These don't just return a single integer */
1270 /* TCP options all take an 'int' value. */
1272 if (get_user_u32(len, optlen))
1273 return -TARGET_EFAULT;
1275 return -TARGET_EINVAL;
1277 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1284 if (put_user_u32(val, optval_addr))
1285 return -TARGET_EFAULT;
1287 if (put_user_u8(val, optval_addr))
1288 return -TARGET_EFAULT;
1290 if (put_user_u32(len, optlen))
1291 return -TARGET_EFAULT;
1298 case IP_ROUTER_ALERT:
1302 case IP_MTU_DISCOVER:
1308 case IP_MULTICAST_TTL:
1309 case IP_MULTICAST_LOOP:
1310 if (get_user_u32(len, optlen))
1311 return -TARGET_EFAULT;
1313 return -TARGET_EINVAL;
1315 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1318 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1320 if (put_user_u32(len, optlen)
1321 || put_user_u8(val, optval_addr))
1322 return -TARGET_EFAULT;
1324 if (len > sizeof(int))
1326 if (put_user_u32(len, optlen)
1327 || put_user_u32(val, optval_addr))
1328 return -TARGET_EFAULT;
1332 ret = -TARGET_ENOPROTOOPT;
1338 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1340 ret = -TARGET_EOPNOTSUPP;
1347 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1348 * other lock functions have a return code of 0 for failure.
1350 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1351 int count, int copy)
1353 struct target_iovec *target_vec;
1357 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1359 return -TARGET_EFAULT;
1360 for(i = 0;i < count; i++) {
1361 base = tswapl(target_vec[i].iov_base);
1362 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1363 if (vec[i].iov_len != 0) {
1364 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1365 /* Don't check lock_user return value. We must call writev even
1366 if a element has invalid base address. */
1368 /* zero length pointer is ignored */
1369 vec[i].iov_base = NULL;
1372 unlock_user (target_vec, target_addr, 0);
1376 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1377 int count, int copy)
1379 struct target_iovec *target_vec;
1383 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1385 return -TARGET_EFAULT;
1386 for(i = 0;i < count; i++) {
1387 if (target_vec[i].iov_base) {
1388 base = tswapl(target_vec[i].iov_base);
1389 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1392 unlock_user (target_vec, target_addr, 0);
1397 /* do_socket() Must return target values and target errnos. */
1398 static abi_long do_socket(int domain, int type, int protocol)
1400 #if defined(TARGET_MIPS)
1402 case TARGET_SOCK_DGRAM:
1405 case TARGET_SOCK_STREAM:
1408 case TARGET_SOCK_RAW:
1411 case TARGET_SOCK_RDM:
1414 case TARGET_SOCK_SEQPACKET:
1415 type = SOCK_SEQPACKET;
1417 case TARGET_SOCK_PACKET:
1422 if (domain == PF_NETLINK)
1423 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1424 return get_errno(socket(domain, type, protocol));
1427 /* do_bind() Must return target values and target errnos. */
1428 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1434 return -TARGET_EINVAL;
1436 addr = alloca(addrlen+1);
1438 target_to_host_sockaddr(addr, target_addr, addrlen);
1439 return get_errno(bind(sockfd, addr, addrlen));
1442 /* do_connect() Must return target values and target errnos. */
1443 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1449 return -TARGET_EINVAL;
1451 addr = alloca(addrlen);
1453 target_to_host_sockaddr(addr, target_addr, addrlen);
1454 return get_errno(connect(sockfd, addr, addrlen));
1457 /* do_sendrecvmsg() Must return target values and target errnos. */
1458 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1459 int flags, int send)
1462 struct target_msghdr *msgp;
1466 abi_ulong target_vec;
1469 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1473 return -TARGET_EFAULT;
1474 if (msgp->msg_name) {
1475 msg.msg_namelen = tswap32(msgp->msg_namelen);
1476 msg.msg_name = alloca(msg.msg_namelen);
1477 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1480 msg.msg_name = NULL;
1481 msg.msg_namelen = 0;
1483 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1484 msg.msg_control = alloca(msg.msg_controllen);
1485 msg.msg_flags = tswap32(msgp->msg_flags);
1487 count = tswapl(msgp->msg_iovlen);
1488 vec = alloca(count * sizeof(struct iovec));
1489 target_vec = tswapl(msgp->msg_iov);
1490 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1491 msg.msg_iovlen = count;
1495 ret = target_to_host_cmsg(&msg, msgp);
1497 ret = get_errno(sendmsg(fd, &msg, flags));
1499 ret = get_errno(recvmsg(fd, &msg, flags));
1500 if (!is_error(ret)) {
1502 ret = host_to_target_cmsg(msgp, &msg);
1507 unlock_iovec(vec, target_vec, count, !send);
1508 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1512 /* do_accept() Must return target values and target errnos. */
1513 static abi_long do_accept(int fd, abi_ulong target_addr,
1514 abi_ulong target_addrlen_addr)
1520 if (target_addr == 0)
1521 return get_errno(accept(fd, NULL, NULL));
1523 if (get_user_u32(addrlen, target_addrlen_addr))
1524 return -TARGET_EFAULT;
1527 return -TARGET_EINVAL;
1529 addr = alloca(addrlen);
1531 ret = get_errno(accept(fd, addr, &addrlen));
1532 if (!is_error(ret)) {
1533 host_to_target_sockaddr(target_addr, addr, addrlen);
1534 if (put_user_u32(addrlen, target_addrlen_addr))
1535 ret = -TARGET_EFAULT;
1540 /* do_getpeername() Must return target values and target errnos. */
1541 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1542 abi_ulong target_addrlen_addr)
1548 if (get_user_u32(addrlen, target_addrlen_addr))
1549 return -TARGET_EFAULT;
1552 return -TARGET_EINVAL;
1554 addr = alloca(addrlen);
1556 ret = get_errno(getpeername(fd, addr, &addrlen));
1557 if (!is_error(ret)) {
1558 host_to_target_sockaddr(target_addr, addr, addrlen);
1559 if (put_user_u32(addrlen, target_addrlen_addr))
1560 ret = -TARGET_EFAULT;
1565 /* do_getsockname() Must return target values and target errnos. */
1566 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1567 abi_ulong target_addrlen_addr)
1573 if (target_addr == 0)
1574 return get_errno(accept(fd, NULL, NULL));
1576 if (get_user_u32(addrlen, target_addrlen_addr))
1577 return -TARGET_EFAULT;
1580 return -TARGET_EINVAL;
1582 addr = alloca(addrlen);
1584 ret = get_errno(getsockname(fd, addr, &addrlen));
1585 if (!is_error(ret)) {
1586 host_to_target_sockaddr(target_addr, addr, addrlen);
1587 if (put_user_u32(addrlen, target_addrlen_addr))
1588 ret = -TARGET_EFAULT;
1593 /* do_socketpair() Must return target values and target errnos. */
1594 static abi_long do_socketpair(int domain, int type, int protocol,
1595 abi_ulong target_tab_addr)
1600 ret = get_errno(socketpair(domain, type, protocol, tab));
1601 if (!is_error(ret)) {
1602 if (put_user_s32(tab[0], target_tab_addr)
1603 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1604 ret = -TARGET_EFAULT;
1609 /* do_sendto() Must return target values and target errnos. */
1610 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1611 abi_ulong target_addr, socklen_t addrlen)
1618 return -TARGET_EINVAL;
1620 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1622 return -TARGET_EFAULT;
1624 addr = alloca(addrlen);
1625 target_to_host_sockaddr(addr, target_addr, addrlen);
1626 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1628 ret = get_errno(send(fd, host_msg, len, flags));
1630 unlock_user(host_msg, msg, 0);
1634 /* do_recvfrom() Must return target values and target errnos. */
1635 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1636 abi_ulong target_addr,
1637 abi_ulong target_addrlen)
1644 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1646 return -TARGET_EFAULT;
1648 if (get_user_u32(addrlen, target_addrlen)) {
1649 ret = -TARGET_EFAULT;
1653 ret = -TARGET_EINVAL;
1656 addr = alloca(addrlen);
1657 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1659 addr = NULL; /* To keep compiler quiet. */
1660 ret = get_errno(recv(fd, host_msg, len, flags));
1662 if (!is_error(ret)) {
1664 host_to_target_sockaddr(target_addr, addr, addrlen);
1665 if (put_user_u32(addrlen, target_addrlen)) {
1666 ret = -TARGET_EFAULT;
1670 unlock_user(host_msg, msg, len);
1673 unlock_user(host_msg, msg, 0);
1678 #ifdef TARGET_NR_socketcall
1679 /* do_socketcall() Must return target values and target errnos. */
1680 static abi_long do_socketcall(int num, abi_ulong vptr)
1683 const int n = sizeof(abi_ulong);
1688 int domain, type, protocol;
1690 if (get_user_s32(domain, vptr)
1691 || get_user_s32(type, vptr + n)
1692 || get_user_s32(protocol, vptr + 2 * n))
1693 return -TARGET_EFAULT;
1695 ret = do_socket(domain, type, protocol);
1701 abi_ulong target_addr;
1704 if (get_user_s32(sockfd, vptr)
1705 || get_user_ual(target_addr, vptr + n)
1706 || get_user_u32(addrlen, vptr + 2 * n))
1707 return -TARGET_EFAULT;
1709 ret = do_bind(sockfd, target_addr, addrlen);
1712 case SOCKOP_connect:
1715 abi_ulong target_addr;
1718 if (get_user_s32(sockfd, vptr)
1719 || get_user_ual(target_addr, vptr + n)
1720 || get_user_u32(addrlen, vptr + 2 * n))
1721 return -TARGET_EFAULT;
1723 ret = do_connect(sockfd, target_addr, addrlen);
1728 int sockfd, backlog;
1730 if (get_user_s32(sockfd, vptr)
1731 || get_user_s32(backlog, vptr + n))
1732 return -TARGET_EFAULT;
1734 ret = get_errno(listen(sockfd, backlog));
1740 abi_ulong target_addr, target_addrlen;
1742 if (get_user_s32(sockfd, vptr)
1743 || get_user_ual(target_addr, vptr + n)
1744 || get_user_u32(target_addrlen, vptr + 2 * n))
1745 return -TARGET_EFAULT;
1747 ret = do_accept(sockfd, target_addr, target_addrlen);
1750 case SOCKOP_getsockname:
1753 abi_ulong target_addr, target_addrlen;
1755 if (get_user_s32(sockfd, vptr)
1756 || get_user_ual(target_addr, vptr + n)
1757 || get_user_u32(target_addrlen, vptr + 2 * n))
1758 return -TARGET_EFAULT;
1760 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1763 case SOCKOP_getpeername:
1766 abi_ulong target_addr, target_addrlen;
1768 if (get_user_s32(sockfd, vptr)
1769 || get_user_ual(target_addr, vptr + n)
1770 || get_user_u32(target_addrlen, vptr + 2 * n))
1771 return -TARGET_EFAULT;
1773 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1776 case SOCKOP_socketpair:
1778 int domain, type, protocol;
1781 if (get_user_s32(domain, vptr)
1782 || get_user_s32(type, vptr + n)
1783 || get_user_s32(protocol, vptr + 2 * n)
1784 || get_user_ual(tab, vptr + 3 * n))
1785 return -TARGET_EFAULT;
1787 ret = do_socketpair(domain, type, protocol, tab);
1797 if (get_user_s32(sockfd, vptr)
1798 || get_user_ual(msg, vptr + n)
1799 || get_user_ual(len, vptr + 2 * n)
1800 || get_user_s32(flags, vptr + 3 * n))
1801 return -TARGET_EFAULT;
1803 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1813 if (get_user_s32(sockfd, vptr)
1814 || get_user_ual(msg, vptr + n)
1815 || get_user_ual(len, vptr + 2 * n)
1816 || get_user_s32(flags, vptr + 3 * n))
1817 return -TARGET_EFAULT;
1819 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1831 if (get_user_s32(sockfd, vptr)
1832 || get_user_ual(msg, vptr + n)
1833 || get_user_ual(len, vptr + 2 * n)
1834 || get_user_s32(flags, vptr + 3 * n)
1835 || get_user_ual(addr, vptr + 4 * n)
1836 || get_user_u32(addrlen, vptr + 5 * n))
1837 return -TARGET_EFAULT;
1839 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1842 case SOCKOP_recvfrom:
1851 if (get_user_s32(sockfd, vptr)
1852 || get_user_ual(msg, vptr + n)
1853 || get_user_ual(len, vptr + 2 * n)
1854 || get_user_s32(flags, vptr + 3 * n)
1855 || get_user_ual(addr, vptr + 4 * n)
1856 || get_user_u32(addrlen, vptr + 5 * n))
1857 return -TARGET_EFAULT;
1859 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1862 case SOCKOP_shutdown:
1866 if (get_user_s32(sockfd, vptr)
1867 || get_user_s32(how, vptr + n))
1868 return -TARGET_EFAULT;
1870 ret = get_errno(shutdown(sockfd, how));
1873 case SOCKOP_sendmsg:
1874 case SOCKOP_recvmsg:
1877 abi_ulong target_msg;
1880 if (get_user_s32(fd, vptr)
1881 || get_user_ual(target_msg, vptr + n)
1882 || get_user_s32(flags, vptr + 2 * n))
1883 return -TARGET_EFAULT;
1885 ret = do_sendrecvmsg(fd, target_msg, flags,
1886 (num == SOCKOP_sendmsg));
1889 case SOCKOP_setsockopt:
1897 if (get_user_s32(sockfd, vptr)
1898 || get_user_s32(level, vptr + n)
1899 || get_user_s32(optname, vptr + 2 * n)
1900 || get_user_ual(optval, vptr + 3 * n)
1901 || get_user_u32(optlen, vptr + 4 * n))
1902 return -TARGET_EFAULT;
1904 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1907 case SOCKOP_getsockopt:
1915 if (get_user_s32(sockfd, vptr)
1916 || get_user_s32(level, vptr + n)
1917 || get_user_s32(optname, vptr + 2 * n)
1918 || get_user_ual(optval, vptr + 3 * n)
1919 || get_user_u32(optlen, vptr + 4 * n))
1920 return -TARGET_EFAULT;
1922 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1926 gemu_log("Unsupported socketcall: %d\n", num);
1927 ret = -TARGET_ENOSYS;
1934 #define N_SHM_REGIONS 32
1936 static struct shm_region {
1939 } shm_regions[N_SHM_REGIONS];
1941 struct target_ipc_perm
1948 unsigned short int mode;
1949 unsigned short int __pad1;
1950 unsigned short int __seq;
1951 unsigned short int __pad2;
1952 abi_ulong __unused1;
1953 abi_ulong __unused2;
1956 struct target_semid_ds
1958 struct target_ipc_perm sem_perm;
1959 abi_ulong sem_otime;
1960 abi_ulong __unused1;
1961 abi_ulong sem_ctime;
1962 abi_ulong __unused2;
1963 abi_ulong sem_nsems;
1964 abi_ulong __unused3;
1965 abi_ulong __unused4;
1968 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1969 abi_ulong target_addr)
1971 struct target_ipc_perm *target_ip;
1972 struct target_semid_ds *target_sd;
1974 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1975 return -TARGET_EFAULT;
1976 target_ip=&(target_sd->sem_perm);
1977 host_ip->__key = tswapl(target_ip->__key);
1978 host_ip->uid = tswapl(target_ip->uid);
1979 host_ip->gid = tswapl(target_ip->gid);
1980 host_ip->cuid = tswapl(target_ip->cuid);
1981 host_ip->cgid = tswapl(target_ip->cgid);
1982 host_ip->mode = tswapl(target_ip->mode);
1983 unlock_user_struct(target_sd, target_addr, 0);
1987 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1988 struct ipc_perm *host_ip)
1990 struct target_ipc_perm *target_ip;
1991 struct target_semid_ds *target_sd;
1993 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1994 return -TARGET_EFAULT;
1995 target_ip = &(target_sd->sem_perm);
1996 target_ip->__key = tswapl(host_ip->__key);
1997 target_ip->uid = tswapl(host_ip->uid);
1998 target_ip->gid = tswapl(host_ip->gid);
1999 target_ip->cuid = tswapl(host_ip->cuid);
2000 target_ip->cgid = tswapl(host_ip->cgid);
2001 target_ip->mode = tswapl(host_ip->mode);
2002 unlock_user_struct(target_sd, target_addr, 1);
2006 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2007 abi_ulong target_addr)
2009 struct target_semid_ds *target_sd;
2011 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2012 return -TARGET_EFAULT;
2013 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2014 return -TARGET_EFAULT;
2015 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2016 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2017 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2018 unlock_user_struct(target_sd, target_addr, 0);
2022 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2023 struct semid_ds *host_sd)
2025 struct target_semid_ds *target_sd;
2027 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2028 return -TARGET_EFAULT;
2029 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2030 return -TARGET_EFAULT;;
2031 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2032 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2033 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2034 unlock_user_struct(target_sd, target_addr, 1);
2038 struct target_seminfo {
2051 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2052 struct seminfo *host_seminfo)
2054 struct target_seminfo *target_seminfo;
2055 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2056 return -TARGET_EFAULT;
2057 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2058 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2059 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2060 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2061 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2062 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2063 __put_user(host_seminfo->semume, &target_seminfo->semume);
2064 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2065 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2066 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2067 unlock_user_struct(target_seminfo, target_addr, 1);
2073 struct semid_ds *buf;
2074 unsigned short *array;
2075 struct seminfo *__buf;
2078 union target_semun {
2085 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2086 abi_ulong target_addr)
2089 unsigned short *array;
2091 struct semid_ds semid_ds;
2094 semun.buf = &semid_ds;
2096 ret = semctl(semid, 0, IPC_STAT, semun);
2098 return get_errno(ret);
2100 nsems = semid_ds.sem_nsems;
2102 *host_array = malloc(nsems*sizeof(unsigned short));
2103 array = lock_user(VERIFY_READ, target_addr,
2104 nsems*sizeof(unsigned short), 1);
2106 return -TARGET_EFAULT;
2108 for(i=0; i<nsems; i++) {
2109 __get_user((*host_array)[i], &array[i]);
2111 unlock_user(array, target_addr, 0);
2116 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2117 unsigned short **host_array)
2120 unsigned short *array;
2122 struct semid_ds semid_ds;
2125 semun.buf = &semid_ds;
2127 ret = semctl(semid, 0, IPC_STAT, semun);
2129 return get_errno(ret);
2131 nsems = semid_ds.sem_nsems;
2133 array = lock_user(VERIFY_WRITE, target_addr,
2134 nsems*sizeof(unsigned short), 0);
2136 return -TARGET_EFAULT;
2138 for(i=0; i<nsems; i++) {
2139 __put_user((*host_array)[i], &array[i]);
2142 unlock_user(array, target_addr, 1);
2147 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2148 union target_semun target_su)
2151 struct semid_ds dsarg;
2152 unsigned short *array;
2153 struct seminfo seminfo;
2154 abi_long ret = -TARGET_EINVAL;
2161 arg.val = tswapl(target_su.val);
2162 ret = get_errno(semctl(semid, semnum, cmd, arg));
2163 target_su.val = tswapl(arg.val);
2167 err = target_to_host_semarray(semid, &array, target_su.array);
2171 ret = get_errno(semctl(semid, semnum, cmd, arg));
2172 err = host_to_target_semarray(semid, target_su.array, &array);
2179 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2183 ret = get_errno(semctl(semid, semnum, cmd, arg));
2184 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2190 arg.__buf = &seminfo;
2191 ret = get_errno(semctl(semid, semnum, cmd, arg));
2192 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2200 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2207 struct target_sembuf {
2208 unsigned short sem_num;
2213 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2214 abi_ulong target_addr,
2217 struct target_sembuf *target_sembuf;
2220 target_sembuf = lock_user(VERIFY_READ, target_addr,
2221 nsops*sizeof(struct target_sembuf), 1);
2223 return -TARGET_EFAULT;
2225 for(i=0; i<nsops; i++) {
2226 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2227 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2228 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2231 unlock_user(target_sembuf, target_addr, 0);
2236 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2238 struct sembuf sops[nsops];
2240 if (target_to_host_sembuf(sops, ptr, nsops))
2241 return -TARGET_EFAULT;
2243 return semop(semid, sops, nsops);
2246 struct target_msqid_ds
2248 struct target_ipc_perm msg_perm;
2249 abi_ulong msg_stime;
2250 #if TARGET_ABI_BITS == 32
2251 abi_ulong __unused1;
2253 abi_ulong msg_rtime;
2254 #if TARGET_ABI_BITS == 32
2255 abi_ulong __unused2;
2257 abi_ulong msg_ctime;
2258 #if TARGET_ABI_BITS == 32
2259 abi_ulong __unused3;
2261 abi_ulong __msg_cbytes;
2263 abi_ulong msg_qbytes;
2264 abi_ulong msg_lspid;
2265 abi_ulong msg_lrpid;
2266 abi_ulong __unused4;
2267 abi_ulong __unused5;
2270 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2271 abi_ulong target_addr)
2273 struct target_msqid_ds *target_md;
2275 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2276 return -TARGET_EFAULT;
2277 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2278 return -TARGET_EFAULT;
2279 host_md->msg_stime = tswapl(target_md->msg_stime);
2280 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2281 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2282 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2283 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2284 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2285 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2286 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2287 unlock_user_struct(target_md, target_addr, 0);
2291 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2292 struct msqid_ds *host_md)
2294 struct target_msqid_ds *target_md;
2296 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2297 return -TARGET_EFAULT;
2298 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2299 return -TARGET_EFAULT;
2300 target_md->msg_stime = tswapl(host_md->msg_stime);
2301 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2302 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2303 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2304 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2305 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2306 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2307 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2308 unlock_user_struct(target_md, target_addr, 1);
2312 struct target_msginfo {
2320 unsigned short int msgseg;
2323 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2324 struct msginfo *host_msginfo)
2326 struct target_msginfo *target_msginfo;
2327 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2328 return -TARGET_EFAULT;
2329 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2330 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2331 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2332 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2333 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2334 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2335 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2336 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2337 unlock_user_struct(target_msginfo, target_addr, 1);
2341 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2343 struct msqid_ds dsarg;
2344 struct msginfo msginfo;
2345 abi_long ret = -TARGET_EINVAL;
2353 if (target_to_host_msqid_ds(&dsarg,ptr))
2354 return -TARGET_EFAULT;
2355 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2356 if (host_to_target_msqid_ds(ptr,&dsarg))
2357 return -TARGET_EFAULT;
2360 ret = get_errno(msgctl(msgid, cmd, NULL));
2364 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2365 if (host_to_target_msginfo(ptr, &msginfo))
2366 return -TARGET_EFAULT;
2373 struct target_msgbuf {
2378 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2379 unsigned int msgsz, int msgflg)
2381 struct target_msgbuf *target_mb;
2382 struct msgbuf *host_mb;
2385 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2386 return -TARGET_EFAULT;
2387 host_mb = malloc(msgsz+sizeof(long));
2388 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2389 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2390 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2392 unlock_user_struct(target_mb, msgp, 0);
2397 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2398 unsigned int msgsz, abi_long msgtyp,
2401 struct target_msgbuf *target_mb;
2403 struct msgbuf *host_mb;
2406 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2407 return -TARGET_EFAULT;
2409 host_mb = malloc(msgsz+sizeof(long));
2410 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2413 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2414 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2415 if (!target_mtext) {
2416 ret = -TARGET_EFAULT;
2419 memcpy(target_mb->mtext, host_mb->mtext, ret);
2420 unlock_user(target_mtext, target_mtext_addr, ret);
2423 target_mb->mtype = tswapl(host_mb->mtype);
2428 unlock_user_struct(target_mb, msgp, 1);
2432 struct target_shmid_ds
2434 struct target_ipc_perm shm_perm;
2435 abi_ulong shm_segsz;
2436 abi_ulong shm_atime;
2437 #if TARGET_ABI_BITS == 32
2438 abi_ulong __unused1;
2440 abi_ulong shm_dtime;
2441 #if TARGET_ABI_BITS == 32
2442 abi_ulong __unused2;
2444 abi_ulong shm_ctime;
2445 #if TARGET_ABI_BITS == 32
2446 abi_ulong __unused3;
2450 abi_ulong shm_nattch;
2451 unsigned long int __unused4;
2452 unsigned long int __unused5;
2455 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2456 abi_ulong target_addr)
2458 struct target_shmid_ds *target_sd;
2460 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2461 return -TARGET_EFAULT;
2462 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2463 return -TARGET_EFAULT;
2464 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2465 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2466 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2467 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2468 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2469 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2470 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2471 unlock_user_struct(target_sd, target_addr, 0);
2475 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2476 struct shmid_ds *host_sd)
2478 struct target_shmid_ds *target_sd;
2480 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2481 return -TARGET_EFAULT;
2482 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2483 return -TARGET_EFAULT;
2484 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2485 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2486 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2487 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2488 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2489 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2490 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2491 unlock_user_struct(target_sd, target_addr, 1);
2495 struct target_shminfo {
2503 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2504 struct shminfo *host_shminfo)
2506 struct target_shminfo *target_shminfo;
2507 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2508 return -TARGET_EFAULT;
2509 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2510 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2511 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2512 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2513 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2514 unlock_user_struct(target_shminfo, target_addr, 1);
2518 struct target_shm_info {
2523 abi_ulong swap_attempts;
2524 abi_ulong swap_successes;
2527 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2528 struct shm_info *host_shm_info)
2530 struct target_shm_info *target_shm_info;
2531 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2532 return -TARGET_EFAULT;
2533 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2534 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2535 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2536 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2537 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2538 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2539 unlock_user_struct(target_shm_info, target_addr, 1);
2543 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2545 struct shmid_ds dsarg;
2546 struct shminfo shminfo;
2547 struct shm_info shm_info;
2548 abi_long ret = -TARGET_EINVAL;
2556 if (target_to_host_shmid_ds(&dsarg, buf))
2557 return -TARGET_EFAULT;
2558 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2559 if (host_to_target_shmid_ds(buf, &dsarg))
2560 return -TARGET_EFAULT;
2563 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2564 if (host_to_target_shminfo(buf, &shminfo))
2565 return -TARGET_EFAULT;
2568 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2569 if (host_to_target_shm_info(buf, &shm_info))
2570 return -TARGET_EFAULT;
2575 ret = get_errno(shmctl(shmid, cmd, NULL));
2582 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2586 struct shmid_ds shm_info;
2589 /* find out the length of the shared memory segment */
2590 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2591 if (is_error(ret)) {
2592 /* can't get length, bail out */
2599 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2601 abi_ulong mmap_start;
2603 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2605 if (mmap_start == -1) {
2607 host_raddr = (void *)-1;
2609 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2612 if (host_raddr == (void *)-1) {
2614 return get_errno((long)host_raddr);
2616 raddr=h2g((unsigned long)host_raddr);
2618 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2619 PAGE_VALID | PAGE_READ |
2620 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2622 for (i = 0; i < N_SHM_REGIONS; i++) {
2623 if (shm_regions[i].start == 0) {
2624 shm_regions[i].start = raddr;
2625 shm_regions[i].size = shm_info.shm_segsz;
2635 static inline abi_long do_shmdt(abi_ulong shmaddr)
2639 for (i = 0; i < N_SHM_REGIONS; ++i) {
2640 if (shm_regions[i].start == shmaddr) {
2641 shm_regions[i].start = 0;
2642 page_set_flags(shmaddr, shm_regions[i].size, 0);
2647 return get_errno(shmdt(g2h(shmaddr)));
2650 #ifdef TARGET_NR_ipc
2651 /* ??? This only works with linear mappings. */
2652 /* do_ipc() must return target values and target errnos. */
2653 static abi_long do_ipc(unsigned int call, int first,
2654 int second, int third,
2655 abi_long ptr, abi_long fifth)
2660 version = call >> 16;
2665 ret = do_semop(first, ptr, second);
2669 ret = get_errno(semget(first, second, third));
2673 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2677 ret = get_errno(msgget(first, second));
2681 ret = do_msgsnd(first, ptr, second, third);
2685 ret = do_msgctl(first, second, ptr);
2692 struct target_ipc_kludge {
2697 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2698 ret = -TARGET_EFAULT;
2702 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2704 unlock_user_struct(tmp, ptr, 0);
2708 ret = do_msgrcv(first, ptr, second, fifth, third);
2717 raddr = do_shmat(first, ptr, second);
2718 if (is_error(raddr))
2719 return get_errno(raddr);
2720 if (put_user_ual(raddr, third))
2721 return -TARGET_EFAULT;
2725 ret = -TARGET_EINVAL;
2730 ret = do_shmdt(ptr);
2734 /* IPC_* flag values are the same on all linux platforms */
2735 ret = get_errno(shmget(first, second, third));
2738 /* IPC_* and SHM_* command values are the same on all linux platforms */
2740 ret = do_shmctl(first, second, third);
2743 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2744 ret = -TARGET_ENOSYS;
2751 /* kernel structure types definitions */
2754 #define STRUCT(name, list...) STRUCT_ ## name,
2755 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2757 #include "syscall_types.h"
2760 #undef STRUCT_SPECIAL
2762 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2763 #define STRUCT_SPECIAL(name)
2764 #include "syscall_types.h"
2766 #undef STRUCT_SPECIAL
2768 typedef struct IOCTLEntry {
2769 unsigned int target_cmd;
2770 unsigned int host_cmd;
2773 const argtype arg_type[5];
2776 #define IOC_R 0x0001
2777 #define IOC_W 0x0002
2778 #define IOC_RW (IOC_R | IOC_W)
2780 #define MAX_STRUCT_SIZE 4096
2782 static IOCTLEntry ioctl_entries[] = {
2783 #define IOCTL(cmd, access, types...) \
2784 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2789 /* ??? Implement proper locking for ioctls. */
2790 /* do_ioctl() Must return target values and target errnos. */
2791 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2793 const IOCTLEntry *ie;
2794 const argtype *arg_type;
2796 uint8_t buf_temp[MAX_STRUCT_SIZE];
2802 if (ie->target_cmd == 0) {
2803 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2804 return -TARGET_ENOSYS;
2806 if (ie->target_cmd == cmd)
2810 arg_type = ie->arg_type;
2812 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2814 switch(arg_type[0]) {
2817 ret = get_errno(ioctl(fd, ie->host_cmd));
2822 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2826 target_size = thunk_type_size(arg_type, 0);
2827 switch(ie->access) {
2829 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2830 if (!is_error(ret)) {
2831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2833 return -TARGET_EFAULT;
2834 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2835 unlock_user(argptr, arg, target_size);
2839 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2841 return -TARGET_EFAULT;
2842 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2843 unlock_user(argptr, arg, 0);
2844 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2848 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2850 return -TARGET_EFAULT;
2851 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2852 unlock_user(argptr, arg, 0);
2853 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2854 if (!is_error(ret)) {
2855 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2857 return -TARGET_EFAULT;
2858 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2859 unlock_user(argptr, arg, target_size);
2865 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2866 (long)cmd, arg_type[0]);
2867 ret = -TARGET_ENOSYS;
2873 static const bitmask_transtbl iflag_tbl[] = {
2874 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2875 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2876 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2877 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2878 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2879 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2880 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2881 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2882 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2883 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2884 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2885 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2886 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2887 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2891 static const bitmask_transtbl oflag_tbl[] = {
2892 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2893 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2894 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2895 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2896 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2897 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2898 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2899 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2900 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2901 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2902 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2903 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2904 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2905 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2906 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2907 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2908 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2909 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2910 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2911 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2912 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2913 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2914 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2915 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2919 static const bitmask_transtbl cflag_tbl[] = {
2920 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2921 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2922 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2923 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2924 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2925 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2926 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2927 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2928 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2929 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2930 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2931 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2932 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2933 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2934 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2935 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2936 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2937 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2938 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2939 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2940 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2941 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2942 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2943 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2944 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2945 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2946 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2947 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2948 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2949 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2950 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2954 static const bitmask_transtbl lflag_tbl[] = {
2955 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2956 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2957 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2958 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2959 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2960 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2961 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2962 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2963 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2964 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2965 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2966 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2967 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2968 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2969 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2973 static void target_to_host_termios (void *dst, const void *src)
2975 struct host_termios *host = dst;
2976 const struct target_termios *target = src;
2979 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2981 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2983 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2985 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2986 host->c_line = target->c_line;
2988 memset(host->c_cc, 0, sizeof(host->c_cc));
2989 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2990 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2991 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2992 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2993 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2994 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2995 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2996 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2997 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2998 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2999 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3000 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3001 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3002 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3003 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3004 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3005 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3008 static void host_to_target_termios (void *dst, const void *src)
3010 struct target_termios *target = dst;
3011 const struct host_termios *host = src;
3014 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3016 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3018 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3020 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3021 target->c_line = host->c_line;
3023 memset(target->c_cc, 0, sizeof(target->c_cc));
3024 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3025 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3026 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3027 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3028 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3029 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3030 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3031 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3032 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3033 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3034 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3035 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3036 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3037 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3038 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3039 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3040 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3043 static const StructEntry struct_termios_def = {
3044 .convert = { host_to_target_termios, target_to_host_termios },
3045 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3046 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3049 static bitmask_transtbl mmap_flags_tbl[] = {
3050 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3051 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3052 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3053 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3054 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3055 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3056 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3057 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3061 #if defined(TARGET_I386)
3063 /* NOTE: there is really one LDT for all the threads */
3064 static uint8_t *ldt_table;
3066 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3073 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3074 if (size > bytecount)
3076 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3078 return -TARGET_EFAULT;
3079 /* ??? Should this by byteswapped? */
3080 memcpy(p, ldt_table, size);
3081 unlock_user(p, ptr, size);
3085 /* XXX: add locking support */
3086 static abi_long write_ldt(CPUX86State *env,
3087 abi_ulong ptr, unsigned long bytecount, int oldmode)
3089 struct target_modify_ldt_ldt_s ldt_info;
3090 struct target_modify_ldt_ldt_s *target_ldt_info;
3091 int seg_32bit, contents, read_exec_only, limit_in_pages;
3092 int seg_not_present, useable, lm;
3093 uint32_t *lp, entry_1, entry_2;
3095 if (bytecount != sizeof(ldt_info))
3096 return -TARGET_EINVAL;
3097 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3098 return -TARGET_EFAULT;
3099 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3100 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3101 ldt_info.limit = tswap32(target_ldt_info->limit);
3102 ldt_info.flags = tswap32(target_ldt_info->flags);
3103 unlock_user_struct(target_ldt_info, ptr, 0);
3105 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3106 return -TARGET_EINVAL;
3107 seg_32bit = ldt_info.flags & 1;
3108 contents = (ldt_info.flags >> 1) & 3;
3109 read_exec_only = (ldt_info.flags >> 3) & 1;
3110 limit_in_pages = (ldt_info.flags >> 4) & 1;
3111 seg_not_present = (ldt_info.flags >> 5) & 1;
3112 useable = (ldt_info.flags >> 6) & 1;
3116 lm = (ldt_info.flags >> 7) & 1;
3118 if (contents == 3) {
3120 return -TARGET_EINVAL;
3121 if (seg_not_present == 0)
3122 return -TARGET_EINVAL;
3124 /* allocate the LDT */
3126 env->ldt.base = target_mmap(0,
3127 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3128 PROT_READ|PROT_WRITE,
3129 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3130 if (env->ldt.base == -1)
3131 return -TARGET_ENOMEM;
3132 memset(g2h(env->ldt.base), 0,
3133 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3134 env->ldt.limit = 0xffff;
3135 ldt_table = g2h(env->ldt.base);
3138 /* NOTE: same code as Linux kernel */
3139 /* Allow LDTs to be cleared by the user. */
3140 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3143 read_exec_only == 1 &&
3145 limit_in_pages == 0 &&
3146 seg_not_present == 1 &&
3154 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3155 (ldt_info.limit & 0x0ffff);
3156 entry_2 = (ldt_info.base_addr & 0xff000000) |
3157 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3158 (ldt_info.limit & 0xf0000) |
3159 ((read_exec_only ^ 1) << 9) |
3161 ((seg_not_present ^ 1) << 15) |
3163 (limit_in_pages << 23) |
3167 entry_2 |= (useable << 20);
3169 /* Install the new entry ... */
3171 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3172 lp[0] = tswap32(entry_1);
3173 lp[1] = tswap32(entry_2);
3177 /* specific and weird i386 syscalls */
3178 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3179 unsigned long bytecount)
3185 ret = read_ldt(ptr, bytecount);
3188 ret = write_ldt(env, ptr, bytecount, 1);
3191 ret = write_ldt(env, ptr, bytecount, 0);
3194 ret = -TARGET_ENOSYS;
3200 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3201 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3203 uint64_t *gdt_table = g2h(env->gdt.base);
3204 struct target_modify_ldt_ldt_s ldt_info;
3205 struct target_modify_ldt_ldt_s *target_ldt_info;
3206 int seg_32bit, contents, read_exec_only, limit_in_pages;
3207 int seg_not_present, useable, lm;
3208 uint32_t *lp, entry_1, entry_2;
3211 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3212 if (!target_ldt_info)
3213 return -TARGET_EFAULT;
3214 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3215 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3216 ldt_info.limit = tswap32(target_ldt_info->limit);
3217 ldt_info.flags = tswap32(target_ldt_info->flags);
3218 if (ldt_info.entry_number == -1) {
3219 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3220 if (gdt_table[i] == 0) {
3221 ldt_info.entry_number = i;
3222 target_ldt_info->entry_number = tswap32(i);
3227 unlock_user_struct(target_ldt_info, ptr, 1);
3229 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3230 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3231 return -TARGET_EINVAL;
3232 seg_32bit = ldt_info.flags & 1;
3233 contents = (ldt_info.flags >> 1) & 3;
3234 read_exec_only = (ldt_info.flags >> 3) & 1;
3235 limit_in_pages = (ldt_info.flags >> 4) & 1;
3236 seg_not_present = (ldt_info.flags >> 5) & 1;
3237 useable = (ldt_info.flags >> 6) & 1;
3241 lm = (ldt_info.flags >> 7) & 1;
3244 if (contents == 3) {
3245 if (seg_not_present == 0)
3246 return -TARGET_EINVAL;
3249 /* NOTE: same code as Linux kernel */
3250 /* Allow LDTs to be cleared by the user. */
3251 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3252 if ((contents == 0 &&
3253 read_exec_only == 1 &&
3255 limit_in_pages == 0 &&
3256 seg_not_present == 1 &&
3264 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3265 (ldt_info.limit & 0x0ffff);
3266 entry_2 = (ldt_info.base_addr & 0xff000000) |
3267 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3268 (ldt_info.limit & 0xf0000) |
3269 ((read_exec_only ^ 1) << 9) |
3271 ((seg_not_present ^ 1) << 15) |
3273 (limit_in_pages << 23) |
3278 /* Install the new entry ... */
3280 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3281 lp[0] = tswap32(entry_1);
3282 lp[1] = tswap32(entry_2);
3286 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3288 struct target_modify_ldt_ldt_s *target_ldt_info;
3289 uint64_t *gdt_table = g2h(env->gdt.base);
3290 uint32_t base_addr, limit, flags;
3291 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3292 int seg_not_present, useable, lm;
3293 uint32_t *lp, entry_1, entry_2;
3295 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3296 if (!target_ldt_info)
3297 return -TARGET_EFAULT;
3298 idx = tswap32(target_ldt_info->entry_number);
3299 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3300 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3301 unlock_user_struct(target_ldt_info, ptr, 1);
3302 return -TARGET_EINVAL;
3304 lp = (uint32_t *)(gdt_table + idx);
3305 entry_1 = tswap32(lp[0]);
3306 entry_2 = tswap32(lp[1]);
3308 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3309 contents = (entry_2 >> 10) & 3;
3310 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3311 seg_32bit = (entry_2 >> 22) & 1;
3312 limit_in_pages = (entry_2 >> 23) & 1;
3313 useable = (entry_2 >> 20) & 1;
3317 lm = (entry_2 >> 21) & 1;
3319 flags = (seg_32bit << 0) | (contents << 1) |
3320 (read_exec_only << 3) | (limit_in_pages << 4) |
3321 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3322 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3323 base_addr = (entry_1 >> 16) |
3324 (entry_2 & 0xff000000) |
3325 ((entry_2 & 0xff) << 16);
3326 target_ldt_info->base_addr = tswapl(base_addr);
3327 target_ldt_info->limit = tswap32(limit);
3328 target_ldt_info->flags = tswap32(flags);
3329 unlock_user_struct(target_ldt_info, ptr, 1);
3332 #endif /* TARGET_I386 && TARGET_ABI32 */
3334 #ifndef TARGET_ABI32
3335 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3342 case TARGET_ARCH_SET_GS:
3343 case TARGET_ARCH_SET_FS:
3344 if (code == TARGET_ARCH_SET_GS)
3348 cpu_x86_load_seg(env, idx, 0);
3349 env->segs[idx].base = addr;
3351 case TARGET_ARCH_GET_GS:
3352 case TARGET_ARCH_GET_FS:
3353 if (code == TARGET_ARCH_GET_GS)
3357 val = env->segs[idx].base;
3358 if (put_user(val, addr, abi_ulong))
3359 return -TARGET_EFAULT;
3362 ret = -TARGET_EINVAL;
3369 #endif /* defined(TARGET_I386) */
3371 #if defined(USE_NPTL)
3373 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3375 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3378 pthread_mutex_t mutex;
3379 pthread_cond_t cond;
3383 abi_ulong child_tidptr;
3384 abi_ulong parent_tidptr;
3388 static void *clone_func(void *arg)
3390 new_thread_info *info = arg;
3396 ts = (TaskState *)thread_env->opaque;
3397 info->tid = gettid();
3399 if (info->child_tidptr)
3400 put_user_u32(info->tid, info->child_tidptr);
3401 if (info->parent_tidptr)
3402 put_user_u32(info->tid, info->parent_tidptr);
3403 /* Enable signals. */
3404 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3405 /* Signal to the parent that we're ready. */
3406 pthread_mutex_lock(&info->mutex);
3407 pthread_cond_broadcast(&info->cond);
3408 pthread_mutex_unlock(&info->mutex);
3409 /* Wait until the parent has finshed initializing the tls state. */
3410 pthread_mutex_lock(&clone_lock);
3411 pthread_mutex_unlock(&clone_lock);
3417 /* this stack is the equivalent of the kernel stack associated with a
3419 #define NEW_STACK_SIZE 8192
3421 static int clone_func(void *arg)
3423 CPUState *env = arg;
3430 /* do_fork() Must return host values and target errnos (unlike most
3431 do_*() functions). */
3432 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3433 abi_ulong parent_tidptr, target_ulong newtls,
3434 abi_ulong child_tidptr)
3440 #if defined(USE_NPTL)
3441 unsigned int nptl_flags;
3445 /* Emulate vfork() with fork() */
3446 if (flags & CLONE_VFORK)
3447 flags &= ~(CLONE_VFORK | CLONE_VM);
3449 if (flags & CLONE_VM) {
3450 TaskState *parent_ts = (TaskState *)env->opaque;
3451 #if defined(USE_NPTL)
3452 new_thread_info info;
3453 pthread_attr_t attr;
3455 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3456 init_task_state(ts);
3457 new_stack = ts->stack;
3458 /* we create a new CPU instance. */
3459 new_env = cpu_copy(env);
3460 /* Init regs that differ from the parent. */
3461 cpu_clone_regs(new_env, newsp);
3462 new_env->opaque = ts;
3463 ts->bprm = parent_ts->bprm;
3464 ts->info = parent_ts->info;
3465 #if defined(USE_NPTL)
3467 flags &= ~CLONE_NPTL_FLAGS2;
3469 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3470 ts->child_tidptr = child_tidptr;
3473 if (nptl_flags & CLONE_SETTLS)
3474 cpu_set_tls (new_env, newtls);
3476 /* Grab a mutex so that thread setup appears atomic. */
3477 pthread_mutex_lock(&clone_lock);
3479 memset(&info, 0, sizeof(info));
3480 pthread_mutex_init(&info.mutex, NULL);
3481 pthread_mutex_lock(&info.mutex);
3482 pthread_cond_init(&info.cond, NULL);
3484 if (nptl_flags & CLONE_CHILD_SETTID)
3485 info.child_tidptr = child_tidptr;
3486 if (nptl_flags & CLONE_PARENT_SETTID)
3487 info.parent_tidptr = parent_tidptr;
3489 ret = pthread_attr_init(&attr);
3490 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3491 /* It is not safe to deliver signals until the child has finished
3492 initializing, so temporarily block all signals. */
3493 sigfillset(&sigmask);
3494 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3496 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3497 /* TODO: Free new CPU state if thread creation failed. */
3499 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3500 pthread_attr_destroy(&attr);
3502 /* Wait for the child to initialize. */
3503 pthread_cond_wait(&info.cond, &info.mutex);
3505 if (flags & CLONE_PARENT_SETTID)
3506 put_user_u32(ret, parent_tidptr);
3510 pthread_mutex_unlock(&info.mutex);
3511 pthread_cond_destroy(&info.cond);
3512 pthread_mutex_destroy(&info.mutex);
3513 pthread_mutex_unlock(&clone_lock);
3515 if (flags & CLONE_NPTL_FLAGS2)
3517 /* This is probably going to die very quickly, but do it anyway. */
3519 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3521 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3525 /* if no CLONE_VM, we consider it is a fork */
3526 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3531 /* Child Process. */
3532 cpu_clone_regs(env, newsp);
3534 #if defined(USE_NPTL)
3535 /* There is a race condition here. The parent process could
3536 theoretically read the TID in the child process before the child
3537 tid is set. This would require using either ptrace
3538 (not implemented) or having *_tidptr to point at a shared memory
3539 mapping. We can't repeat the spinlock hack used above because
3540 the child process gets its own copy of the lock. */
3541 if (flags & CLONE_CHILD_SETTID)
3542 put_user_u32(gettid(), child_tidptr);
3543 if (flags & CLONE_PARENT_SETTID)
3544 put_user_u32(gettid(), parent_tidptr);
3545 ts = (TaskState *)env->opaque;
3546 if (flags & CLONE_SETTLS)
3547 cpu_set_tls (env, newtls);
3548 if (flags & CLONE_CHILD_CLEARTID)
3549 ts->child_tidptr = child_tidptr;
3558 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3561 struct target_flock *target_fl;
3562 struct flock64 fl64;
3563 struct target_flock64 *target_fl64;
3567 case TARGET_F_GETLK:
3568 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3569 return -TARGET_EFAULT;
3570 fl.l_type = tswap16(target_fl->l_type);
3571 fl.l_whence = tswap16(target_fl->l_whence);
3572 fl.l_start = tswapl(target_fl->l_start);
3573 fl.l_len = tswapl(target_fl->l_len);
3574 fl.l_pid = tswapl(target_fl->l_pid);
3575 unlock_user_struct(target_fl, arg, 0);
3576 ret = get_errno(fcntl(fd, F_GETLK, &fl));
3578 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3579 return -TARGET_EFAULT;
3580 target_fl->l_type = tswap16(fl.l_type);
3581 target_fl->l_whence = tswap16(fl.l_whence);
3582 target_fl->l_start = tswapl(fl.l_start);
3583 target_fl->l_len = tswapl(fl.l_len);
3584 target_fl->l_pid = tswapl(fl.l_pid);
3585 unlock_user_struct(target_fl, arg, 1);
3589 case TARGET_F_SETLK:
3590 case TARGET_F_SETLKW:
3591 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3592 return -TARGET_EFAULT;
3593 fl.l_type = tswap16(target_fl->l_type);
3594 fl.l_whence = tswap16(target_fl->l_whence);
3595 fl.l_start = tswapl(target_fl->l_start);
3596 fl.l_len = tswapl(target_fl->l_len);
3597 fl.l_pid = tswapl(target_fl->l_pid);
3598 unlock_user_struct(target_fl, arg, 0);
3599 ret = get_errno(fcntl(fd, F_SETLK+(cmd-TARGET_F_SETLK), &fl));
3602 case TARGET_F_GETLK64:
3603 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3604 return -TARGET_EFAULT;
3605 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3606 fl64.l_whence = tswap16(target_fl64->l_whence);
3607 fl64.l_start = tswapl(target_fl64->l_start);
3608 fl64.l_len = tswapl(target_fl64->l_len);
3609 fl64.l_pid = tswap16(target_fl64->l_pid);
3610 unlock_user_struct(target_fl64, arg, 0);
3611 ret = get_errno(fcntl(fd, F_GETLK64, &fl64));
3613 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3614 return -TARGET_EFAULT;
3615 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3616 target_fl64->l_whence = tswap16(fl64.l_whence);
3617 target_fl64->l_start = tswapl(fl64.l_start);
3618 target_fl64->l_len = tswapl(fl64.l_len);
3619 target_fl64->l_pid = tswapl(fl64.l_pid);
3620 unlock_user_struct(target_fl64, arg, 1);
3623 case TARGET_F_SETLK64:
3624 case TARGET_F_SETLKW64:
3625 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3626 return -TARGET_EFAULT;
3627 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3628 fl64.l_whence = tswap16(target_fl64->l_whence);
3629 fl64.l_start = tswapl(target_fl64->l_start);
3630 fl64.l_len = tswapl(target_fl64->l_len);
3631 fl64.l_pid = tswap16(target_fl64->l_pid);
3632 unlock_user_struct(target_fl64, arg, 0);
3633 ret = get_errno(fcntl(fd, F_SETLK64+(cmd-TARGET_F_SETLK64), &fl64));
3637 ret = get_errno(fcntl(fd, cmd, arg));
3639 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3644 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3648 ret = get_errno(fcntl(fd, cmd, arg));
3656 static inline int high2lowuid(int uid)
3664 static inline int high2lowgid(int gid)
3672 static inline int low2highuid(int uid)
3674 if ((int16_t)uid == -1)
3680 static inline int low2highgid(int gid)
3682 if ((int16_t)gid == -1)
3688 #endif /* USE_UID16 */
3690 void syscall_init(void)
3693 const argtype *arg_type;
3697 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3698 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3699 #include "syscall_types.h"
3701 #undef STRUCT_SPECIAL
3703 /* we patch the ioctl size if necessary. We rely on the fact that
3704 no ioctl has all the bits at '1' in the size field */
3706 while (ie->target_cmd != 0) {
3707 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3708 TARGET_IOC_SIZEMASK) {
3709 arg_type = ie->arg_type;
3710 if (arg_type[0] != TYPE_PTR) {
3711 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3716 size = thunk_type_size(arg_type, 0);
3717 ie->target_cmd = (ie->target_cmd &
3718 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3719 (size << TARGET_IOC_SIZESHIFT);
3722 /* Build target_to_host_errno_table[] table from
3723 * host_to_target_errno_table[]. */
3724 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3725 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3727 /* automatic consistency check if same arch */
3728 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3729 (defined(__x86_64__) && defined(TARGET_X86_64))
3730 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3731 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3732 ie->name, ie->target_cmd, ie->host_cmd);
3739 #if TARGET_ABI_BITS == 32
3740 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3742 #ifdef TARGET_WORDS_BIGENDIAN
3743 return ((uint64_t)word0 << 32) | word1;
3745 return ((uint64_t)word1 << 32) | word0;
3748 #else /* TARGET_ABI_BITS == 32 */
3749 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3753 #endif /* TARGET_ABI_BITS != 32 */
3755 #ifdef TARGET_NR_truncate64
3756 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3762 if (((CPUARMState *)cpu_env)->eabi)
3768 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3772 #ifdef TARGET_NR_ftruncate64
3773 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3779 if (((CPUARMState *)cpu_env)->eabi)
3785 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3789 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3790 abi_ulong target_addr)
3792 struct target_timespec *target_ts;
3794 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3795 return -TARGET_EFAULT;
3796 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3797 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3798 unlock_user_struct(target_ts, target_addr, 0);
3802 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3803 struct timespec *host_ts)
3805 struct target_timespec *target_ts;
3807 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3808 return -TARGET_EFAULT;
3809 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3810 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3811 unlock_user_struct(target_ts, target_addr, 1);
3815 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3816 static inline abi_long host_to_target_stat64(void *cpu_env,
3817 abi_ulong target_addr,
3818 struct stat *host_st)
3821 if (((CPUARMState *)cpu_env)->eabi) {
3822 struct target_eabi_stat64 *target_st;
3824 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3825 return -TARGET_EFAULT;
3826 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3827 __put_user(host_st->st_dev, &target_st->st_dev);
3828 __put_user(host_st->st_ino, &target_st->st_ino);
3829 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3830 __put_user(host_st->st_ino, &target_st->__st_ino);
3832 __put_user(host_st->st_mode, &target_st->st_mode);
3833 __put_user(host_st->st_nlink, &target_st->st_nlink);
3834 __put_user(host_st->st_uid, &target_st->st_uid);
3835 __put_user(host_st->st_gid, &target_st->st_gid);
3836 __put_user(host_st->st_rdev, &target_st->st_rdev);
3837 __put_user(host_st->st_size, &target_st->st_size);
3838 __put_user(host_st->st_blksize, &target_st->st_blksize);
3839 __put_user(host_st->st_blocks, &target_st->st_blocks);
3840 __put_user(host_st->st_atime, &target_st->target_st_atime);
3841 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3842 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3843 unlock_user_struct(target_st, target_addr, 1);
3847 #if TARGET_LONG_BITS == 64
3848 struct target_stat *target_st;
3850 struct target_stat64 *target_st;
3853 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3854 return -TARGET_EFAULT;
3855 memset(target_st, 0, sizeof(*target_st));
3856 __put_user(host_st->st_dev, &target_st->st_dev);
3857 __put_user(host_st->st_ino, &target_st->st_ino);
3858 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3859 __put_user(host_st->st_ino, &target_st->__st_ino);
3861 __put_user(host_st->st_mode, &target_st->st_mode);
3862 __put_user(host_st->st_nlink, &target_st->st_nlink);
3863 __put_user(host_st->st_uid, &target_st->st_uid);
3864 __put_user(host_st->st_gid, &target_st->st_gid);
3865 __put_user(host_st->st_rdev, &target_st->st_rdev);
3866 /* XXX: better use of kernel struct */
3867 __put_user(host_st->st_size, &target_st->st_size);
3868 __put_user(host_st->st_blksize, &target_st->st_blksize);
3869 __put_user(host_st->st_blocks, &target_st->st_blocks);
3870 __put_user(host_st->st_atime, &target_st->target_st_atime);
3871 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3872 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3873 unlock_user_struct(target_st, target_addr, 1);
3880 #if defined(USE_NPTL)
3881 /* ??? Using host futex calls even when target atomic operations
3882 are not really atomic probably breaks things. However implementing
3883 futexes locally would make futexes shared between multiple processes
3884 tricky. However they're probably useless because guest atomic
3885 operations won't work either. */
3886 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3887 target_ulong uaddr2, int val3)
3889 struct timespec ts, *pts;
3891 /* ??? We assume FUTEX_* constants are the same on both host
3897 target_to_host_timespec(pts, timeout);
3901 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3904 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3906 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3908 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3909 NULL, g2h(uaddr2), 0));
3910 case FUTEX_CMP_REQUEUE:
3911 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3912 NULL, g2h(uaddr2), tswap32(val3)));
3914 return -TARGET_ENOSYS;
3919 /* Map host to target signal numbers for the wait family of syscalls.
3920 Assume all other status bits are the same. */
3921 static int host_to_target_waitstatus(int status)
3923 if (WIFSIGNALED(status)) {
3924 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
3926 if (WIFSTOPPED(status)) {
3927 return (host_to_target_signal(WSTOPSIG(status)) << 8)
3933 int get_osversion(void)
3935 static int osversion;
3936 struct new_utsname buf;
3941 if (qemu_uname_release && *qemu_uname_release) {
3942 s = qemu_uname_release;
3944 if (sys_uname(&buf))
3949 for (i = 0; i < 3; i++) {
3951 while (*s >= '0' && *s <= '9') {
3956 tmp = (tmp << 8) + n;
3964 /* do_syscall() should always have a single exit point at the end so
3965 that actions, such as logging of syscall results, can be performed.
3966 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3967 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3968 abi_long arg2, abi_long arg3, abi_long arg4,
3969 abi_long arg5, abi_long arg6)
3977 gemu_log("syscall %d", num);
3980 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3983 case TARGET_NR_exit:
3985 /* In old applications this may be used to implement _exit(2).
3986 However in threaded applictions it is used for thread termination,
3987 and _exit_group is used for application termination.
3988 Do thread termination if we have more then one thread. */
3989 /* FIXME: This probably breaks if a signal arrives. We should probably
3990 be disabling signals. */
3991 if (first_cpu->next_cpu) {
3998 while (p && p != (CPUState *)cpu_env) {
3999 lastp = &p->next_cpu;
4002 /* If we didn't find the CPU for this thread then something is
4006 /* Remove the CPU from the list. */
4007 *lastp = p->next_cpu;
4009 TaskState *ts = ((CPUState *)cpu_env)->opaque;
4010 if (ts->child_tidptr) {
4011 put_user_u32(0, ts->child_tidptr);
4012 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4015 /* TODO: Free CPU state. */
4022 gdb_exit(cpu_env, arg1);
4024 ret = 0; /* avoid warning */
4026 case TARGET_NR_read:
4030 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4032 ret = get_errno(read(arg1, p, arg3));
4033 unlock_user(p, arg2, ret);
4036 case TARGET_NR_write:
4037 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4039 ret = get_errno(write(arg1, p, arg3));
4040 unlock_user(p, arg2, 0);
4042 case TARGET_NR_open:
4043 if (!(p = lock_user_string(arg1)))
4045 ret = get_errno(open(path(p),
4046 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4048 unlock_user(p, arg1, 0);
4050 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4051 case TARGET_NR_openat:
4052 if (!(p = lock_user_string(arg2)))
4054 ret = get_errno(sys_openat(arg1,
4056 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4058 unlock_user(p, arg2, 0);
4061 case TARGET_NR_close:
4062 ret = get_errno(close(arg1));
4067 case TARGET_NR_fork:
4068 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4070 #ifdef TARGET_NR_waitpid
4071 case TARGET_NR_waitpid:
4074 ret = get_errno(waitpid(arg1, &status, arg3));
4075 if (!is_error(ret) && arg2
4076 && put_user_s32(host_to_target_waitstatus(status), arg2))
4081 #ifdef TARGET_NR_waitid
4082 case TARGET_NR_waitid:
4086 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4087 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4088 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4090 host_to_target_siginfo(p, &info);
4091 unlock_user(p, arg3, sizeof(target_siginfo_t));
4096 #ifdef TARGET_NR_creat /* not on alpha */
4097 case TARGET_NR_creat:
4098 if (!(p = lock_user_string(arg1)))
4100 ret = get_errno(creat(p, arg2));
4101 unlock_user(p, arg1, 0);
4104 case TARGET_NR_link:
4107 p = lock_user_string(arg1);
4108 p2 = lock_user_string(arg2);
4110 ret = -TARGET_EFAULT;
4112 ret = get_errno(link(p, p2));
4113 unlock_user(p2, arg2, 0);
4114 unlock_user(p, arg1, 0);
4117 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4118 case TARGET_NR_linkat:
4123 p = lock_user_string(arg2);
4124 p2 = lock_user_string(arg4);
4126 ret = -TARGET_EFAULT;
4128 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4129 unlock_user(p, arg2, 0);
4130 unlock_user(p2, arg4, 0);
4134 case TARGET_NR_unlink:
4135 if (!(p = lock_user_string(arg1)))
4137 ret = get_errno(unlink(p));
4138 unlock_user(p, arg1, 0);
4140 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4141 case TARGET_NR_unlinkat:
4142 if (!(p = lock_user_string(arg2)))
4144 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4145 unlock_user(p, arg2, 0);
4148 case TARGET_NR_execve:
4150 char **argp, **envp;
4153 abi_ulong guest_argp;
4154 abi_ulong guest_envp;
4160 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4161 if (get_user_ual(addr, gp))
4169 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4170 if (get_user_ual(addr, gp))
4177 argp = alloca((argc + 1) * sizeof(void *));
4178 envp = alloca((envc + 1) * sizeof(void *));
4180 for (gp = guest_argp, q = argp; gp;
4181 gp += sizeof(abi_ulong), q++) {
4182 if (get_user_ual(addr, gp))
4186 if (!(*q = lock_user_string(addr)))
4191 for (gp = guest_envp, q = envp; gp;
4192 gp += sizeof(abi_ulong), q++) {
4193 if (get_user_ual(addr, gp))
4197 if (!(*q = lock_user_string(addr)))
4202 if (!(p = lock_user_string(arg1)))
4204 ret = get_errno(execve(p, argp, envp));
4205 unlock_user(p, arg1, 0);
4210 ret = -TARGET_EFAULT;
4213 for (gp = guest_argp, q = argp; *q;
4214 gp += sizeof(abi_ulong), q++) {
4215 if (get_user_ual(addr, gp)
4218 unlock_user(*q, addr, 0);
4220 for (gp = guest_envp, q = envp; *q;
4221 gp += sizeof(abi_ulong), q++) {
4222 if (get_user_ual(addr, gp)
4225 unlock_user(*q, addr, 0);
4229 case TARGET_NR_chdir:
4230 if (!(p = lock_user_string(arg1)))
4232 ret = get_errno(chdir(p));
4233 unlock_user(p, arg1, 0);
4235 #ifdef TARGET_NR_time
4236 case TARGET_NR_time:
4239 ret = get_errno(time(&host_time));
4242 && put_user_sal(host_time, arg1))
4247 case TARGET_NR_mknod:
4248 if (!(p = lock_user_string(arg1)))
4250 ret = get_errno(mknod(p, arg2, arg3));
4251 unlock_user(p, arg1, 0);
4253 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4254 case TARGET_NR_mknodat:
4255 if (!(p = lock_user_string(arg2)))
4257 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4258 unlock_user(p, arg2, 0);
4261 case TARGET_NR_chmod:
4262 if (!(p = lock_user_string(arg1)))
4264 ret = get_errno(chmod(p, arg2));
4265 unlock_user(p, arg1, 0);
4267 #ifdef TARGET_NR_break
4268 case TARGET_NR_break:
4271 #ifdef TARGET_NR_oldstat
4272 case TARGET_NR_oldstat:
4275 case TARGET_NR_lseek:
4276 ret = get_errno(lseek(arg1, arg2, arg3));
4278 #ifdef TARGET_NR_getxpid
4279 case TARGET_NR_getxpid:
4281 case TARGET_NR_getpid:
4283 ret = get_errno(getpid());
4285 case TARGET_NR_mount:
4287 /* need to look at the data field */
4289 p = lock_user_string(arg1);
4290 p2 = lock_user_string(arg2);
4291 p3 = lock_user_string(arg3);
4292 if (!p || !p2 || !p3)
4293 ret = -TARGET_EFAULT;
4295 /* FIXME - arg5 should be locked, but it isn't clear how to
4296 * do that since it's not guaranteed to be a NULL-terminated
4299 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4300 unlock_user(p, arg1, 0);
4301 unlock_user(p2, arg2, 0);
4302 unlock_user(p3, arg3, 0);
4305 #ifdef TARGET_NR_umount
4306 case TARGET_NR_umount:
4307 if (!(p = lock_user_string(arg1)))
4309 ret = get_errno(umount(p));
4310 unlock_user(p, arg1, 0);
4313 #ifdef TARGET_NR_stime /* not on alpha */
4314 case TARGET_NR_stime:
4317 if (get_user_sal(host_time, arg1))
4319 ret = get_errno(stime(&host_time));
4323 case TARGET_NR_ptrace:
4325 #ifdef TARGET_NR_alarm /* not on alpha */
4326 case TARGET_NR_alarm:
4330 #ifdef TARGET_NR_oldfstat
4331 case TARGET_NR_oldfstat:
4334 #ifdef TARGET_NR_pause /* not on alpha */
4335 case TARGET_NR_pause:
4336 ret = get_errno(pause());
4339 #ifdef TARGET_NR_utime
4340 case TARGET_NR_utime:
4342 struct utimbuf tbuf, *host_tbuf;
4343 struct target_utimbuf *target_tbuf;
4345 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4347 tbuf.actime = tswapl(target_tbuf->actime);
4348 tbuf.modtime = tswapl(target_tbuf->modtime);
4349 unlock_user_struct(target_tbuf, arg2, 0);
4354 if (!(p = lock_user_string(arg1)))
4356 ret = get_errno(utime(p, host_tbuf));
4357 unlock_user(p, arg1, 0);
4361 case TARGET_NR_utimes:
4363 struct timeval *tvp, tv[2];
4365 if (copy_from_user_timeval(&tv[0], arg2)
4366 || copy_from_user_timeval(&tv[1],
4367 arg2 + sizeof(struct target_timeval)))
4373 if (!(p = lock_user_string(arg1)))
4375 ret = get_errno(utimes(p, tvp));
4376 unlock_user(p, arg1, 0);
4379 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4380 case TARGET_NR_futimesat:
4382 struct timeval *tvp, tv[2];
4384 if (copy_from_user_timeval(&tv[0], arg3)
4385 || copy_from_user_timeval(&tv[1],
4386 arg3 + sizeof(struct target_timeval)))
4392 if (!(p = lock_user_string(arg2)))
4394 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4395 unlock_user(p, arg2, 0);
4399 #ifdef TARGET_NR_stty
4400 case TARGET_NR_stty:
4403 #ifdef TARGET_NR_gtty
4404 case TARGET_NR_gtty:
4407 case TARGET_NR_access:
4408 if (!(p = lock_user_string(arg1)))
4410 ret = get_errno(access(p, arg2));
4411 unlock_user(p, arg1, 0);
4413 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4414 case TARGET_NR_faccessat:
4415 if (!(p = lock_user_string(arg2)))
4417 ret = get_errno(sys_faccessat(arg1, p, arg3));
4418 unlock_user(p, arg2, 0);
4421 #ifdef TARGET_NR_nice /* not on alpha */
4422 case TARGET_NR_nice:
4423 ret = get_errno(nice(arg1));
4426 #ifdef TARGET_NR_ftime
4427 case TARGET_NR_ftime:
4430 case TARGET_NR_sync:
4434 case TARGET_NR_kill:
4435 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4437 case TARGET_NR_rename:
4440 p = lock_user_string(arg1);
4441 p2 = lock_user_string(arg2);
4443 ret = -TARGET_EFAULT;
4445 ret = get_errno(rename(p, p2));
4446 unlock_user(p2, arg2, 0);
4447 unlock_user(p, arg1, 0);
4450 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4451 case TARGET_NR_renameat:
4454 p = lock_user_string(arg2);
4455 p2 = lock_user_string(arg4);
4457 ret = -TARGET_EFAULT;
4459 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4460 unlock_user(p2, arg4, 0);
4461 unlock_user(p, arg2, 0);
4465 case TARGET_NR_mkdir:
4466 if (!(p = lock_user_string(arg1)))
4468 ret = get_errno(mkdir(p, arg2));
4469 unlock_user(p, arg1, 0);
4471 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4472 case TARGET_NR_mkdirat:
4473 if (!(p = lock_user_string(arg2)))
4475 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4476 unlock_user(p, arg2, 0);
4479 case TARGET_NR_rmdir:
4480 if (!(p = lock_user_string(arg1)))
4482 ret = get_errno(rmdir(p));
4483 unlock_user(p, arg1, 0);
4486 ret = get_errno(dup(arg1));
4488 case TARGET_NR_pipe:
4491 ret = get_errno(pipe(host_pipe));
4492 if (!is_error(ret)) {
4493 #if defined(TARGET_MIPS)
4494 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4495 env->active_tc.gpr[3] = host_pipe[1];
4497 #elif defined(TARGET_SH4)
4498 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4501 if (put_user_s32(host_pipe[0], arg1)
4502 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4508 case TARGET_NR_times:
4510 struct target_tms *tmsp;
4512 ret = get_errno(times(&tms));
4514 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4517 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4518 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4519 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4520 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4523 ret = host_to_target_clock_t(ret);
4526 #ifdef TARGET_NR_prof
4527 case TARGET_NR_prof:
4530 #ifdef TARGET_NR_signal
4531 case TARGET_NR_signal:
4534 case TARGET_NR_acct:
4536 ret = get_errno(acct(NULL));
4538 if (!(p = lock_user_string(arg1)))
4540 ret = get_errno(acct(path(p)));
4541 unlock_user(p, arg1, 0);
4544 #ifdef TARGET_NR_umount2 /* not on alpha */
4545 case TARGET_NR_umount2:
4546 if (!(p = lock_user_string(arg1)))
4548 ret = get_errno(umount2(p, arg2));
4549 unlock_user(p, arg1, 0);
4552 #ifdef TARGET_NR_lock
4553 case TARGET_NR_lock:
4556 case TARGET_NR_ioctl:
4557 ret = do_ioctl(arg1, arg2, arg3);
4559 case TARGET_NR_fcntl:
4560 ret = do_fcntl(arg1, arg2, arg3);
4562 #ifdef TARGET_NR_mpx
4566 case TARGET_NR_setpgid:
4567 ret = get_errno(setpgid(arg1, arg2));
4569 #ifdef TARGET_NR_ulimit
4570 case TARGET_NR_ulimit:
4573 #ifdef TARGET_NR_oldolduname
4574 case TARGET_NR_oldolduname:
4577 case TARGET_NR_umask:
4578 ret = get_errno(umask(arg1));
4580 case TARGET_NR_chroot:
4581 if (!(p = lock_user_string(arg1)))
4583 ret = get_errno(chroot(p));
4584 unlock_user(p, arg1, 0);
4586 case TARGET_NR_ustat:
4588 case TARGET_NR_dup2:
4589 ret = get_errno(dup2(arg1, arg2));
4591 #ifdef TARGET_NR_getppid /* not on alpha */
4592 case TARGET_NR_getppid:
4593 ret = get_errno(getppid());
4596 case TARGET_NR_getpgrp:
4597 ret = get_errno(getpgrp());
4599 case TARGET_NR_setsid:
4600 ret = get_errno(setsid());
4602 #ifdef TARGET_NR_sigaction
4603 case TARGET_NR_sigaction:
4605 #if !defined(TARGET_MIPS)
4606 struct target_old_sigaction *old_act;
4607 struct target_sigaction act, oact, *pact;
4609 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4611 act._sa_handler = old_act->_sa_handler;
4612 target_siginitset(&act.sa_mask, old_act->sa_mask);
4613 act.sa_flags = old_act->sa_flags;
4614 act.sa_restorer = old_act->sa_restorer;
4615 unlock_user_struct(old_act, arg2, 0);
4620 ret = get_errno(do_sigaction(arg1, pact, &oact));
4621 if (!is_error(ret) && arg3) {
4622 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4624 old_act->_sa_handler = oact._sa_handler;
4625 old_act->sa_mask = oact.sa_mask.sig[0];
4626 old_act->sa_flags = oact.sa_flags;
4627 old_act->sa_restorer = oact.sa_restorer;
4628 unlock_user_struct(old_act, arg3, 1);
4631 struct target_sigaction act, oact, *pact, *old_act;
4634 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4636 act._sa_handler = old_act->_sa_handler;
4637 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4638 act.sa_flags = old_act->sa_flags;
4639 unlock_user_struct(old_act, arg2, 0);
4645 ret = get_errno(do_sigaction(arg1, pact, &oact));
4647 if (!is_error(ret) && arg3) {
4648 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4650 old_act->_sa_handler = oact._sa_handler;
4651 old_act->sa_flags = oact.sa_flags;
4652 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4653 old_act->sa_mask.sig[1] = 0;
4654 old_act->sa_mask.sig[2] = 0;
4655 old_act->sa_mask.sig[3] = 0;
4656 unlock_user_struct(old_act, arg3, 1);
4662 case TARGET_NR_rt_sigaction:
4664 struct target_sigaction *act;
4665 struct target_sigaction *oact;
4668 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4673 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4674 ret = -TARGET_EFAULT;
4675 goto rt_sigaction_fail;
4679 ret = get_errno(do_sigaction(arg1, act, oact));
4682 unlock_user_struct(act, arg2, 0);
4684 unlock_user_struct(oact, arg3, 1);
4687 #ifdef TARGET_NR_sgetmask /* not on alpha */
4688 case TARGET_NR_sgetmask:
4691 abi_ulong target_set;
4692 sigprocmask(0, NULL, &cur_set);
4693 host_to_target_old_sigset(&target_set, &cur_set);
4698 #ifdef TARGET_NR_ssetmask /* not on alpha */
4699 case TARGET_NR_ssetmask:
4701 sigset_t set, oset, cur_set;
4702 abi_ulong target_set = arg1;
4703 sigprocmask(0, NULL, &cur_set);
4704 target_to_host_old_sigset(&set, &target_set);
4705 sigorset(&set, &set, &cur_set);
4706 sigprocmask(SIG_SETMASK, &set, &oset);
4707 host_to_target_old_sigset(&target_set, &oset);
4712 #ifdef TARGET_NR_sigprocmask
4713 case TARGET_NR_sigprocmask:
4716 sigset_t set, oldset, *set_ptr;
4720 case TARGET_SIG_BLOCK:
4723 case TARGET_SIG_UNBLOCK:
4726 case TARGET_SIG_SETMASK:
4730 ret = -TARGET_EINVAL;
4733 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4735 target_to_host_old_sigset(&set, p);
4736 unlock_user(p, arg2, 0);
4742 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4743 if (!is_error(ret) && arg3) {
4744 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4746 host_to_target_old_sigset(p, &oldset);
4747 unlock_user(p, arg3, sizeof(target_sigset_t));
4752 case TARGET_NR_rt_sigprocmask:
4755 sigset_t set, oldset, *set_ptr;
4759 case TARGET_SIG_BLOCK:
4762 case TARGET_SIG_UNBLOCK:
4765 case TARGET_SIG_SETMASK:
4769 ret = -TARGET_EINVAL;
4772 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4774 target_to_host_sigset(&set, p);
4775 unlock_user(p, arg2, 0);
4781 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4782 if (!is_error(ret) && arg3) {
4783 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4785 host_to_target_sigset(p, &oldset);
4786 unlock_user(p, arg3, sizeof(target_sigset_t));
4790 #ifdef TARGET_NR_sigpending
4791 case TARGET_NR_sigpending:
4794 ret = get_errno(sigpending(&set));
4795 if (!is_error(ret)) {
4796 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4798 host_to_target_old_sigset(p, &set);
4799 unlock_user(p, arg1, sizeof(target_sigset_t));
4804 case TARGET_NR_rt_sigpending:
4807 ret = get_errno(sigpending(&set));
4808 if (!is_error(ret)) {
4809 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4811 host_to_target_sigset(p, &set);
4812 unlock_user(p, arg1, sizeof(target_sigset_t));
4816 #ifdef TARGET_NR_sigsuspend
4817 case TARGET_NR_sigsuspend:
4820 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4822 target_to_host_old_sigset(&set, p);
4823 unlock_user(p, arg1, 0);
4824 ret = get_errno(sigsuspend(&set));
4828 case TARGET_NR_rt_sigsuspend:
4831 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4833 target_to_host_sigset(&set, p);
4834 unlock_user(p, arg1, 0);
4835 ret = get_errno(sigsuspend(&set));
4838 case TARGET_NR_rt_sigtimedwait:
4841 struct timespec uts, *puts;
4844 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4846 target_to_host_sigset(&set, p);
4847 unlock_user(p, arg1, 0);
4850 target_to_host_timespec(puts, arg3);
4854 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4855 if (!is_error(ret) && arg2) {
4856 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4858 host_to_target_siginfo(p, &uinfo);
4859 unlock_user(p, arg2, sizeof(target_siginfo_t));
4863 case TARGET_NR_rt_sigqueueinfo:
4866 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4868 target_to_host_siginfo(&uinfo, p);
4869 unlock_user(p, arg1, 0);
4870 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4873 #ifdef TARGET_NR_sigreturn
4874 case TARGET_NR_sigreturn:
4875 /* NOTE: ret is eax, so not transcoding must be done */
4876 ret = do_sigreturn(cpu_env);
4879 case TARGET_NR_rt_sigreturn:
4880 /* NOTE: ret is eax, so not transcoding must be done */
4881 ret = do_rt_sigreturn(cpu_env);
4883 case TARGET_NR_sethostname:
4884 if (!(p = lock_user_string(arg1)))
4886 ret = get_errno(sethostname(p, arg2));
4887 unlock_user(p, arg1, 0);
4889 case TARGET_NR_setrlimit:
4891 /* XXX: convert resource ? */
4892 int resource = arg1;
4893 struct target_rlimit *target_rlim;
4895 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4897 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4898 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4899 unlock_user_struct(target_rlim, arg2, 0);
4900 ret = get_errno(setrlimit(resource, &rlim));
4903 case TARGET_NR_getrlimit:
4905 /* XXX: convert resource ? */
4906 int resource = arg1;
4907 struct target_rlimit *target_rlim;
4910 ret = get_errno(getrlimit(resource, &rlim));
4911 if (!is_error(ret)) {
4912 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4914 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4915 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4916 unlock_user_struct(target_rlim, arg2, 1);
4920 case TARGET_NR_getrusage:
4922 struct rusage rusage;
4923 ret = get_errno(getrusage(arg1, &rusage));
4924 if (!is_error(ret)) {
4925 host_to_target_rusage(arg2, &rusage);
4929 case TARGET_NR_gettimeofday:
4932 ret = get_errno(gettimeofday(&tv, NULL));
4933 if (!is_error(ret)) {
4934 if (copy_to_user_timeval(arg1, &tv))
4939 case TARGET_NR_settimeofday:
4942 if (copy_from_user_timeval(&tv, arg1))
4944 ret = get_errno(settimeofday(&tv, NULL));
4947 #ifdef TARGET_NR_select
4948 case TARGET_NR_select:
4950 struct target_sel_arg_struct *sel;
4951 abi_ulong inp, outp, exp, tvp;
4954 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4956 nsel = tswapl(sel->n);
4957 inp = tswapl(sel->inp);
4958 outp = tswapl(sel->outp);
4959 exp = tswapl(sel->exp);
4960 tvp = tswapl(sel->tvp);
4961 unlock_user_struct(sel, arg1, 0);
4962 ret = do_select(nsel, inp, outp, exp, tvp);
4966 case TARGET_NR_symlink:
4969 p = lock_user_string(arg1);
4970 p2 = lock_user_string(arg2);
4972 ret = -TARGET_EFAULT;
4974 ret = get_errno(symlink(p, p2));
4975 unlock_user(p2, arg2, 0);
4976 unlock_user(p, arg1, 0);
4979 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4980 case TARGET_NR_symlinkat:
4983 p = lock_user_string(arg1);
4984 p2 = lock_user_string(arg3);
4986 ret = -TARGET_EFAULT;
4988 ret = get_errno(sys_symlinkat(p, arg2, p2));
4989 unlock_user(p2, arg3, 0);
4990 unlock_user(p, arg1, 0);
4994 #ifdef TARGET_NR_oldlstat
4995 case TARGET_NR_oldlstat:
4998 case TARGET_NR_readlink:
5001 p = lock_user_string(arg1);
5002 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5004 ret = -TARGET_EFAULT;
5006 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5007 char real[PATH_MAX];
5008 temp = realpath(exec_path,real);
5009 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5010 snprintf((char *)p2, arg3, "%s", real);
5013 ret = get_errno(readlink(path(p), p2, arg3));
5015 unlock_user(p2, arg2, ret);
5016 unlock_user(p, arg1, 0);
5019 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5020 case TARGET_NR_readlinkat:
5023 p = lock_user_string(arg2);
5024 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5026 ret = -TARGET_EFAULT;
5028 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5029 unlock_user(p2, arg3, ret);
5030 unlock_user(p, arg2, 0);
5034 #ifdef TARGET_NR_uselib
5035 case TARGET_NR_uselib:
5038 #ifdef TARGET_NR_swapon
5039 case TARGET_NR_swapon:
5040 if (!(p = lock_user_string(arg1)))
5042 ret = get_errno(swapon(p, arg2));
5043 unlock_user(p, arg1, 0);
5046 case TARGET_NR_reboot:
5048 #ifdef TARGET_NR_readdir
5049 case TARGET_NR_readdir:
5052 #ifdef TARGET_NR_mmap
5053 case TARGET_NR_mmap:
5054 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5057 abi_ulong v1, v2, v3, v4, v5, v6;
5058 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5066 unlock_user(v, arg1, 0);
5067 ret = get_errno(target_mmap(v1, v2, v3,
5068 target_to_host_bitmask(v4, mmap_flags_tbl),
5072 ret = get_errno(target_mmap(arg1, arg2, arg3,
5073 target_to_host_bitmask(arg4, mmap_flags_tbl),
5079 #ifdef TARGET_NR_mmap2
5080 case TARGET_NR_mmap2:
5082 #define MMAP_SHIFT 12
5084 ret = get_errno(target_mmap(arg1, arg2, arg3,
5085 target_to_host_bitmask(arg4, mmap_flags_tbl),
5087 arg6 << MMAP_SHIFT));
5090 case TARGET_NR_munmap:
5091 ret = get_errno(target_munmap(arg1, arg2));
5093 case TARGET_NR_mprotect:
5094 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5096 #ifdef TARGET_NR_mremap
5097 case TARGET_NR_mremap:
5098 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5101 /* ??? msync/mlock/munlock are broken for softmmu. */
5102 #ifdef TARGET_NR_msync
5103 case TARGET_NR_msync:
5104 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5107 #ifdef TARGET_NR_mlock
5108 case TARGET_NR_mlock:
5109 ret = get_errno(mlock(g2h(arg1), arg2));
5112 #ifdef TARGET_NR_munlock
5113 case TARGET_NR_munlock:
5114 ret = get_errno(munlock(g2h(arg1), arg2));
5117 #ifdef TARGET_NR_mlockall
5118 case TARGET_NR_mlockall:
5119 ret = get_errno(mlockall(arg1));
5122 #ifdef TARGET_NR_munlockall
5123 case TARGET_NR_munlockall:
5124 ret = get_errno(munlockall());
5127 case TARGET_NR_truncate:
5128 if (!(p = lock_user_string(arg1)))
5130 ret = get_errno(truncate(p, arg2));
5131 unlock_user(p, arg1, 0);
5133 case TARGET_NR_ftruncate:
5134 ret = get_errno(ftruncate(arg1, arg2));
5136 case TARGET_NR_fchmod:
5137 ret = get_errno(fchmod(arg1, arg2));
5139 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5140 case TARGET_NR_fchmodat:
5141 if (!(p = lock_user_string(arg2)))
5143 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5144 unlock_user(p, arg2, 0);
5147 case TARGET_NR_getpriority:
5148 /* libc does special remapping of the return value of
5149 * sys_getpriority() so it's just easiest to call
5150 * sys_getpriority() directly rather than through libc. */
5151 ret = sys_getpriority(arg1, arg2);
5153 case TARGET_NR_setpriority:
5154 ret = get_errno(setpriority(arg1, arg2, arg3));
5156 #ifdef TARGET_NR_profil
5157 case TARGET_NR_profil:
5160 case TARGET_NR_statfs:
5161 if (!(p = lock_user_string(arg1)))
5163 ret = get_errno(statfs(path(p), &stfs));
5164 unlock_user(p, arg1, 0);
5166 if (!is_error(ret)) {
5167 struct target_statfs *target_stfs;
5169 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5171 __put_user(stfs.f_type, &target_stfs->f_type);
5172 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5173 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5174 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5175 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5176 __put_user(stfs.f_files, &target_stfs->f_files);
5177 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5178 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5179 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5180 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5181 unlock_user_struct(target_stfs, arg2, 1);
5184 case TARGET_NR_fstatfs:
5185 ret = get_errno(fstatfs(arg1, &stfs));
5186 goto convert_statfs;
5187 #ifdef TARGET_NR_statfs64
5188 case TARGET_NR_statfs64:
5189 if (!(p = lock_user_string(arg1)))
5191 ret = get_errno(statfs(path(p), &stfs));
5192 unlock_user(p, arg1, 0);
5194 if (!is_error(ret)) {
5195 struct target_statfs64 *target_stfs;
5197 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5199 __put_user(stfs.f_type, &target_stfs->f_type);
5200 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5201 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5202 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5203 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5204 __put_user(stfs.f_files, &target_stfs->f_files);
5205 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5206 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5207 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5208 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5209 unlock_user_struct(target_stfs, arg3, 1);
5212 case TARGET_NR_fstatfs64:
5213 ret = get_errno(fstatfs(arg1, &stfs));
5214 goto convert_statfs64;
5216 #ifdef TARGET_NR_ioperm
5217 case TARGET_NR_ioperm:
5220 #ifdef TARGET_NR_socketcall
5221 case TARGET_NR_socketcall:
5222 ret = do_socketcall(arg1, arg2);
5225 #ifdef TARGET_NR_accept
5226 case TARGET_NR_accept:
5227 ret = do_accept(arg1, arg2, arg3);
5230 #ifdef TARGET_NR_bind
5231 case TARGET_NR_bind:
5232 ret = do_bind(arg1, arg2, arg3);
5235 #ifdef TARGET_NR_connect
5236 case TARGET_NR_connect:
5237 ret = do_connect(arg1, arg2, arg3);
5240 #ifdef TARGET_NR_getpeername
5241 case TARGET_NR_getpeername:
5242 ret = do_getpeername(arg1, arg2, arg3);
5245 #ifdef TARGET_NR_getsockname
5246 case TARGET_NR_getsockname:
5247 ret = do_getsockname(arg1, arg2, arg3);
5250 #ifdef TARGET_NR_getsockopt
5251 case TARGET_NR_getsockopt:
5252 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5255 #ifdef TARGET_NR_listen
5256 case TARGET_NR_listen:
5257 ret = get_errno(listen(arg1, arg2));
5260 #ifdef TARGET_NR_recv
5261 case TARGET_NR_recv:
5262 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5265 #ifdef TARGET_NR_recvfrom
5266 case TARGET_NR_recvfrom:
5267 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5270 #ifdef TARGET_NR_recvmsg
5271 case TARGET_NR_recvmsg:
5272 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5275 #ifdef TARGET_NR_send
5276 case TARGET_NR_send:
5277 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5280 #ifdef TARGET_NR_sendmsg
5281 case TARGET_NR_sendmsg:
5282 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5285 #ifdef TARGET_NR_sendto
5286 case TARGET_NR_sendto:
5287 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5290 #ifdef TARGET_NR_shutdown
5291 case TARGET_NR_shutdown:
5292 ret = get_errno(shutdown(arg1, arg2));
5295 #ifdef TARGET_NR_socket
5296 case TARGET_NR_socket:
5297 ret = do_socket(arg1, arg2, arg3);
5300 #ifdef TARGET_NR_socketpair
5301 case TARGET_NR_socketpair:
5302 ret = do_socketpair(arg1, arg2, arg3, arg4);
5305 #ifdef TARGET_NR_setsockopt
5306 case TARGET_NR_setsockopt:
5307 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5311 case TARGET_NR_syslog:
5312 if (!(p = lock_user_string(arg2)))
5314 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5315 unlock_user(p, arg2, 0);
5318 case TARGET_NR_setitimer:
5320 struct itimerval value, ovalue, *pvalue;
5324 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5325 || copy_from_user_timeval(&pvalue->it_value,
5326 arg2 + sizeof(struct target_timeval)))
5331 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5332 if (!is_error(ret) && arg3) {
5333 if (copy_to_user_timeval(arg3,
5334 &ovalue.it_interval)
5335 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5341 case TARGET_NR_getitimer:
5343 struct itimerval value;
5345 ret = get_errno(getitimer(arg1, &value));
5346 if (!is_error(ret) && arg2) {
5347 if (copy_to_user_timeval(arg2,
5349 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5355 case TARGET_NR_stat:
5356 if (!(p = lock_user_string(arg1)))
5358 ret = get_errno(stat(path(p), &st));
5359 unlock_user(p, arg1, 0);
5361 case TARGET_NR_lstat:
5362 if (!(p = lock_user_string(arg1)))
5364 ret = get_errno(lstat(path(p), &st));
5365 unlock_user(p, arg1, 0);
5367 case TARGET_NR_fstat:
5369 ret = get_errno(fstat(arg1, &st));
5371 if (!is_error(ret)) {
5372 struct target_stat *target_st;
5374 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5376 __put_user(st.st_dev, &target_st->st_dev);
5377 __put_user(st.st_ino, &target_st->st_ino);
5378 __put_user(st.st_mode, &target_st->st_mode);
5379 __put_user(st.st_uid, &target_st->st_uid);
5380 __put_user(st.st_gid, &target_st->st_gid);
5381 __put_user(st.st_nlink, &target_st->st_nlink);
5382 __put_user(st.st_rdev, &target_st->st_rdev);
5383 __put_user(st.st_size, &target_st->st_size);
5384 __put_user(st.st_blksize, &target_st->st_blksize);
5385 __put_user(st.st_blocks, &target_st->st_blocks);
5386 __put_user(st.st_atime, &target_st->target_st_atime);
5387 __put_user(st.st_mtime, &target_st->target_st_mtime);
5388 __put_user(st.st_ctime, &target_st->target_st_ctime);
5389 unlock_user_struct(target_st, arg2, 1);
5393 #ifdef TARGET_NR_olduname
5394 case TARGET_NR_olduname:
5397 #ifdef TARGET_NR_iopl
5398 case TARGET_NR_iopl:
5401 case TARGET_NR_vhangup:
5402 ret = get_errno(vhangup());
5404 #ifdef TARGET_NR_idle
5405 case TARGET_NR_idle:
5408 #ifdef TARGET_NR_syscall
5409 case TARGET_NR_syscall:
5410 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5413 case TARGET_NR_wait4:
5416 abi_long status_ptr = arg2;
5417 struct rusage rusage, *rusage_ptr;
5418 abi_ulong target_rusage = arg4;
5420 rusage_ptr = &rusage;
5423 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5424 if (!is_error(ret)) {
5426 status = host_to_target_waitstatus(status);
5427 if (put_user_s32(status, status_ptr))
5431 host_to_target_rusage(target_rusage, &rusage);
5435 #ifdef TARGET_NR_swapoff
5436 case TARGET_NR_swapoff:
5437 if (!(p = lock_user_string(arg1)))
5439 ret = get_errno(swapoff(p));
5440 unlock_user(p, arg1, 0);
5443 case TARGET_NR_sysinfo:
5445 struct target_sysinfo *target_value;
5446 struct sysinfo value;
5447 ret = get_errno(sysinfo(&value));
5448 if (!is_error(ret) && arg1)
5450 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5452 __put_user(value.uptime, &target_value->uptime);
5453 __put_user(value.loads[0], &target_value->loads[0]);
5454 __put_user(value.loads[1], &target_value->loads[1]);
5455 __put_user(value.loads[2], &target_value->loads[2]);
5456 __put_user(value.totalram, &target_value->totalram);
5457 __put_user(value.freeram, &target_value->freeram);
5458 __put_user(value.sharedram, &target_value->sharedram);
5459 __put_user(value.bufferram, &target_value->bufferram);
5460 __put_user(value.totalswap, &target_value->totalswap);
5461 __put_user(value.freeswap, &target_value->freeswap);
5462 __put_user(value.procs, &target_value->procs);
5463 __put_user(value.totalhigh, &target_value->totalhigh);
5464 __put_user(value.freehigh, &target_value->freehigh);
5465 __put_user(value.mem_unit, &target_value->mem_unit);
5466 unlock_user_struct(target_value, arg1, 1);
5470 #ifdef TARGET_NR_ipc
5472 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5475 #ifdef TARGET_NR_semget
5476 case TARGET_NR_semget:
5477 ret = get_errno(semget(arg1, arg2, arg3));
5480 #ifdef TARGET_NR_semop
5481 case TARGET_NR_semop:
5482 ret = get_errno(do_semop(arg1, arg2, arg3));
5485 #ifdef TARGET_NR_semctl
5486 case TARGET_NR_semctl:
5487 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5490 #ifdef TARGET_NR_msgctl
5491 case TARGET_NR_msgctl:
5492 ret = do_msgctl(arg1, arg2, arg3);
5495 #ifdef TARGET_NR_msgget
5496 case TARGET_NR_msgget:
5497 ret = get_errno(msgget(arg1, arg2));
5500 #ifdef TARGET_NR_msgrcv
5501 case TARGET_NR_msgrcv:
5502 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5505 #ifdef TARGET_NR_msgsnd
5506 case TARGET_NR_msgsnd:
5507 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5510 #ifdef TARGET_NR_shmget
5511 case TARGET_NR_shmget:
5512 ret = get_errno(shmget(arg1, arg2, arg3));
5515 #ifdef TARGET_NR_shmctl
5516 case TARGET_NR_shmctl:
5517 ret = do_shmctl(arg1, arg2, arg3);
5520 #ifdef TARGET_NR_shmat
5521 case TARGET_NR_shmat:
5522 ret = do_shmat(arg1, arg2, arg3);
5525 #ifdef TARGET_NR_shmdt
5526 case TARGET_NR_shmdt:
5527 ret = do_shmdt(arg1);
5530 case TARGET_NR_fsync:
5531 ret = get_errno(fsync(arg1));
5533 case TARGET_NR_clone:
5534 #if defined(TARGET_SH4)
5535 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5536 #elif defined(TARGET_CRIS)
5537 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5539 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5542 #ifdef __NR_exit_group
5543 /* new thread calls */
5544 case TARGET_NR_exit_group:
5548 gdb_exit(cpu_env, arg1);
5549 ret = get_errno(exit_group(arg1));
5552 case TARGET_NR_setdomainname:
5553 if (!(p = lock_user_string(arg1)))
5555 ret = get_errno(setdomainname(p, arg2));
5556 unlock_user(p, arg1, 0);
5558 case TARGET_NR_uname:
5559 /* no need to transcode because we use the linux syscall */
5561 struct new_utsname * buf;
5563 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5565 ret = get_errno(sys_uname(buf));
5566 if (!is_error(ret)) {
5567 /* Overrite the native machine name with whatever is being
5569 strcpy (buf->machine, UNAME_MACHINE);
5570 /* Allow the user to override the reported release. */
5571 if (qemu_uname_release && *qemu_uname_release)
5572 strcpy (buf->release, qemu_uname_release);
5574 unlock_user_struct(buf, arg1, 1);
5578 case TARGET_NR_modify_ldt:
5579 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5581 #if !defined(TARGET_X86_64)
5582 case TARGET_NR_vm86old:
5584 case TARGET_NR_vm86:
5585 ret = do_vm86(cpu_env, arg1, arg2);
5589 case TARGET_NR_adjtimex:
5591 #ifdef TARGET_NR_create_module
5592 case TARGET_NR_create_module:
5594 case TARGET_NR_init_module:
5595 case TARGET_NR_delete_module:
5596 #ifdef TARGET_NR_get_kernel_syms
5597 case TARGET_NR_get_kernel_syms:
5600 case TARGET_NR_quotactl:
5602 case TARGET_NR_getpgid:
5603 ret = get_errno(getpgid(arg1));
5605 case TARGET_NR_fchdir:
5606 ret = get_errno(fchdir(arg1));
5608 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5609 case TARGET_NR_bdflush:
5612 #ifdef TARGET_NR_sysfs
5613 case TARGET_NR_sysfs:
5616 case TARGET_NR_personality:
5617 ret = get_errno(personality(arg1));
5619 #ifdef TARGET_NR_afs_syscall
5620 case TARGET_NR_afs_syscall:
5623 #ifdef TARGET_NR__llseek /* Not on alpha */
5624 case TARGET_NR__llseek:
5626 #if defined (__x86_64__)
5627 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5628 if (put_user_s64(ret, arg4))
5632 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5633 if (put_user_s64(res, arg4))
5639 case TARGET_NR_getdents:
5640 #if TARGET_ABI_BITS != 32
5642 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5644 struct target_dirent *target_dirp;
5645 struct linux_dirent *dirp;
5646 abi_long count = arg3;
5648 dirp = malloc(count);
5650 ret = -TARGET_ENOMEM;
5654 ret = get_errno(sys_getdents(arg1, dirp, count));
5655 if (!is_error(ret)) {
5656 struct linux_dirent *de;
5657 struct target_dirent *tde;
5659 int reclen, treclen;
5660 int count1, tnamelen;
5664 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5668 reclen = de->d_reclen;
5669 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5670 tde->d_reclen = tswap16(treclen);
5671 tde->d_ino = tswapl(de->d_ino);
5672 tde->d_off = tswapl(de->d_off);
5673 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5676 /* XXX: may not be correct */
5677 pstrcpy(tde->d_name, tnamelen, de->d_name);
5678 de = (struct linux_dirent *)((char *)de + reclen);
5680 tde = (struct target_dirent *)((char *)tde + treclen);
5684 unlock_user(target_dirp, arg2, ret);
5690 struct linux_dirent *dirp;
5691 abi_long count = arg3;
5693 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5695 ret = get_errno(sys_getdents(arg1, dirp, count));
5696 if (!is_error(ret)) {
5697 struct linux_dirent *de;
5702 reclen = de->d_reclen;
5705 de->d_reclen = tswap16(reclen);
5706 tswapls(&de->d_ino);
5707 tswapls(&de->d_off);
5708 de = (struct linux_dirent *)((char *)de + reclen);
5712 unlock_user(dirp, arg2, ret);
5716 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5717 case TARGET_NR_getdents64:
5719 struct linux_dirent64 *dirp;
5720 abi_long count = arg3;
5721 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5723 ret = get_errno(sys_getdents64(arg1, dirp, count));
5724 if (!is_error(ret)) {
5725 struct linux_dirent64 *de;
5730 reclen = de->d_reclen;
5733 de->d_reclen = tswap16(reclen);
5734 tswap64s((uint64_t *)&de->d_ino);
5735 tswap64s((uint64_t *)&de->d_off);
5736 de = (struct linux_dirent64 *)((char *)de + reclen);
5740 unlock_user(dirp, arg2, ret);
5743 #endif /* TARGET_NR_getdents64 */
5744 #ifdef TARGET_NR__newselect
5745 case TARGET_NR__newselect:
5746 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5749 #ifdef TARGET_NR_poll
5750 case TARGET_NR_poll:
5752 struct target_pollfd *target_pfd;
5753 unsigned int nfds = arg2;
5758 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5761 pfd = alloca(sizeof(struct pollfd) * nfds);
5762 for(i = 0; i < nfds; i++) {
5763 pfd[i].fd = tswap32(target_pfd[i].fd);
5764 pfd[i].events = tswap16(target_pfd[i].events);
5766 ret = get_errno(poll(pfd, nfds, timeout));
5767 if (!is_error(ret)) {
5768 for(i = 0; i < nfds; i++) {
5769 target_pfd[i].revents = tswap16(pfd[i].revents);
5771 ret += nfds * (sizeof(struct target_pollfd)
5772 - sizeof(struct pollfd));
5774 unlock_user(target_pfd, arg1, ret);
5778 case TARGET_NR_flock:
5779 /* NOTE: the flock constant seems to be the same for every
5781 ret = get_errno(flock(arg1, arg2));
5783 case TARGET_NR_readv:
5788 vec = alloca(count * sizeof(struct iovec));
5789 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5791 ret = get_errno(readv(arg1, vec, count));
5792 unlock_iovec(vec, arg2, count, 1);
5795 case TARGET_NR_writev:
5800 vec = alloca(count * sizeof(struct iovec));
5801 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5803 ret = get_errno(writev(arg1, vec, count));
5804 unlock_iovec(vec, arg2, count, 0);
5807 case TARGET_NR_getsid:
5808 ret = get_errno(getsid(arg1));
5810 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5811 case TARGET_NR_fdatasync:
5812 ret = get_errno(fdatasync(arg1));
5815 case TARGET_NR__sysctl:
5816 /* We don't implement this, but ENOTDIR is always a safe
5818 ret = -TARGET_ENOTDIR;
5820 case TARGET_NR_sched_setparam:
5822 struct sched_param *target_schp;
5823 struct sched_param schp;
5825 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5827 schp.sched_priority = tswap32(target_schp->sched_priority);
5828 unlock_user_struct(target_schp, arg2, 0);
5829 ret = get_errno(sched_setparam(arg1, &schp));
5832 case TARGET_NR_sched_getparam:
5834 struct sched_param *target_schp;
5835 struct sched_param schp;
5836 ret = get_errno(sched_getparam(arg1, &schp));
5837 if (!is_error(ret)) {
5838 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5840 target_schp->sched_priority = tswap32(schp.sched_priority);
5841 unlock_user_struct(target_schp, arg2, 1);
5845 case TARGET_NR_sched_setscheduler:
5847 struct sched_param *target_schp;
5848 struct sched_param schp;
5849 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5851 schp.sched_priority = tswap32(target_schp->sched_priority);
5852 unlock_user_struct(target_schp, arg3, 0);
5853 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5856 case TARGET_NR_sched_getscheduler:
5857 ret = get_errno(sched_getscheduler(arg1));
5859 case TARGET_NR_sched_yield:
5860 ret = get_errno(sched_yield());
5862 case TARGET_NR_sched_get_priority_max:
5863 ret = get_errno(sched_get_priority_max(arg1));
5865 case TARGET_NR_sched_get_priority_min:
5866 ret = get_errno(sched_get_priority_min(arg1));
5868 case TARGET_NR_sched_rr_get_interval:
5871 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5872 if (!is_error(ret)) {
5873 host_to_target_timespec(arg2, &ts);
5877 case TARGET_NR_nanosleep:
5879 struct timespec req, rem;
5880 target_to_host_timespec(&req, arg1);
5881 ret = get_errno(nanosleep(&req, &rem));
5882 if (is_error(ret) && arg2) {
5883 host_to_target_timespec(arg2, &rem);
5887 #ifdef TARGET_NR_query_module
5888 case TARGET_NR_query_module:
5891 #ifdef TARGET_NR_nfsservctl
5892 case TARGET_NR_nfsservctl:
5895 case TARGET_NR_prctl:
5898 case PR_GET_PDEATHSIG:
5901 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5902 if (!is_error(ret) && arg2
5903 && put_user_ual(deathsig, arg2))
5908 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5912 #ifdef TARGET_NR_arch_prctl
5913 case TARGET_NR_arch_prctl:
5914 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5915 ret = do_arch_prctl(cpu_env, arg1, arg2);
5921 #ifdef TARGET_NR_pread
5922 case TARGET_NR_pread:
5924 if (((CPUARMState *)cpu_env)->eabi)
5927 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5929 ret = get_errno(pread(arg1, p, arg3, arg4));
5930 unlock_user(p, arg2, ret);
5932 case TARGET_NR_pwrite:
5934 if (((CPUARMState *)cpu_env)->eabi)
5937 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5939 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5940 unlock_user(p, arg2, 0);
5943 #ifdef TARGET_NR_pread64
5944 case TARGET_NR_pread64:
5945 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5947 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5948 unlock_user(p, arg2, ret);
5950 case TARGET_NR_pwrite64:
5951 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5953 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5954 unlock_user(p, arg2, 0);
5957 case TARGET_NR_getcwd:
5958 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5960 ret = get_errno(sys_getcwd1(p, arg2));
5961 unlock_user(p, arg1, ret);
5963 case TARGET_NR_capget:
5965 case TARGET_NR_capset:
5967 case TARGET_NR_sigaltstack:
5968 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5969 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5970 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5975 case TARGET_NR_sendfile:
5977 #ifdef TARGET_NR_getpmsg
5978 case TARGET_NR_getpmsg:
5981 #ifdef TARGET_NR_putpmsg
5982 case TARGET_NR_putpmsg:
5985 #ifdef TARGET_NR_vfork
5986 case TARGET_NR_vfork:
5987 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5991 #ifdef TARGET_NR_ugetrlimit
5992 case TARGET_NR_ugetrlimit:
5995 ret = get_errno(getrlimit(arg1, &rlim));
5996 if (!is_error(ret)) {
5997 struct target_rlimit *target_rlim;
5998 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6000 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
6001 target_rlim->rlim_max = tswapl(rlim.rlim_max);
6002 unlock_user_struct(target_rlim, arg2, 1);
6007 #ifdef TARGET_NR_truncate64
6008 case TARGET_NR_truncate64:
6009 if (!(p = lock_user_string(arg1)))
6011 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6012 unlock_user(p, arg1, 0);
6015 #ifdef TARGET_NR_ftruncate64
6016 case TARGET_NR_ftruncate64:
6017 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6020 #ifdef TARGET_NR_stat64
6021 case TARGET_NR_stat64:
6022 if (!(p = lock_user_string(arg1)))
6024 ret = get_errno(stat(path(p), &st));
6025 unlock_user(p, arg1, 0);
6027 ret = host_to_target_stat64(cpu_env, arg2, &st);
6030 #ifdef TARGET_NR_lstat64
6031 case TARGET_NR_lstat64:
6032 if (!(p = lock_user_string(arg1)))
6034 ret = get_errno(lstat(path(p), &st));
6035 unlock_user(p, arg1, 0);
6037 ret = host_to_target_stat64(cpu_env, arg2, &st);
6040 #ifdef TARGET_NR_fstat64
6041 case TARGET_NR_fstat64:
6042 ret = get_errno(fstat(arg1, &st));
6044 ret = host_to_target_stat64(cpu_env, arg2, &st);
6047 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6048 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6049 #ifdef TARGET_NR_fstatat64
6050 case TARGET_NR_fstatat64:
6052 #ifdef TARGET_NR_newfstatat
6053 case TARGET_NR_newfstatat:
6055 if (!(p = lock_user_string(arg2)))
6057 #ifdef __NR_fstatat64
6058 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6060 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6063 ret = host_to_target_stat64(cpu_env, arg3, &st);
6067 case TARGET_NR_lchown:
6068 if (!(p = lock_user_string(arg1)))
6070 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6071 unlock_user(p, arg1, 0);
6073 case TARGET_NR_getuid:
6074 ret = get_errno(high2lowuid(getuid()));
6076 case TARGET_NR_getgid:
6077 ret = get_errno(high2lowgid(getgid()));
6079 case TARGET_NR_geteuid:
6080 ret = get_errno(high2lowuid(geteuid()));
6082 case TARGET_NR_getegid:
6083 ret = get_errno(high2lowgid(getegid()));
6085 case TARGET_NR_setreuid:
6086 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6088 case TARGET_NR_setregid:
6089 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6091 case TARGET_NR_getgroups:
6093 int gidsetsize = arg1;
6094 uint16_t *target_grouplist;
6098 grouplist = alloca(gidsetsize * sizeof(gid_t));
6099 ret = get_errno(getgroups(gidsetsize, grouplist));
6100 if (gidsetsize == 0)
6102 if (!is_error(ret)) {
6103 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6104 if (!target_grouplist)
6106 for(i = 0;i < ret; i++)
6107 target_grouplist[i] = tswap16(grouplist[i]);
6108 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6112 case TARGET_NR_setgroups:
6114 int gidsetsize = arg1;
6115 uint16_t *target_grouplist;
6119 grouplist = alloca(gidsetsize * sizeof(gid_t));
6120 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6121 if (!target_grouplist) {
6122 ret = -TARGET_EFAULT;
6125 for(i = 0;i < gidsetsize; i++)
6126 grouplist[i] = tswap16(target_grouplist[i]);
6127 unlock_user(target_grouplist, arg2, 0);
6128 ret = get_errno(setgroups(gidsetsize, grouplist));
6131 case TARGET_NR_fchown:
6132 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6134 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6135 case TARGET_NR_fchownat:
6136 if (!(p = lock_user_string(arg2)))
6138 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6139 unlock_user(p, arg2, 0);
6142 #ifdef TARGET_NR_setresuid
6143 case TARGET_NR_setresuid:
6144 ret = get_errno(setresuid(low2highuid(arg1),
6146 low2highuid(arg3)));
6149 #ifdef TARGET_NR_getresuid
6150 case TARGET_NR_getresuid:
6152 uid_t ruid, euid, suid;
6153 ret = get_errno(getresuid(&ruid, &euid, &suid));
6154 if (!is_error(ret)) {
6155 if (put_user_u16(high2lowuid(ruid), arg1)
6156 || put_user_u16(high2lowuid(euid), arg2)
6157 || put_user_u16(high2lowuid(suid), arg3))
6163 #ifdef TARGET_NR_getresgid
6164 case TARGET_NR_setresgid:
6165 ret = get_errno(setresgid(low2highgid(arg1),
6167 low2highgid(arg3)));
6170 #ifdef TARGET_NR_getresgid
6171 case TARGET_NR_getresgid:
6173 gid_t rgid, egid, sgid;
6174 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6175 if (!is_error(ret)) {
6176 if (put_user_u16(high2lowgid(rgid), arg1)
6177 || put_user_u16(high2lowgid(egid), arg2)
6178 || put_user_u16(high2lowgid(sgid), arg3))
6184 case TARGET_NR_chown:
6185 if (!(p = lock_user_string(arg1)))
6187 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6188 unlock_user(p, arg1, 0);
6190 case TARGET_NR_setuid:
6191 ret = get_errno(setuid(low2highuid(arg1)));
6193 case TARGET_NR_setgid:
6194 ret = get_errno(setgid(low2highgid(arg1)));
6196 case TARGET_NR_setfsuid:
6197 ret = get_errno(setfsuid(arg1));
6199 case TARGET_NR_setfsgid:
6200 ret = get_errno(setfsgid(arg1));
6202 #endif /* USE_UID16 */
6204 #ifdef TARGET_NR_lchown32
6205 case TARGET_NR_lchown32:
6206 if (!(p = lock_user_string(arg1)))
6208 ret = get_errno(lchown(p, arg2, arg3));
6209 unlock_user(p, arg1, 0);
6212 #ifdef TARGET_NR_getuid32
6213 case TARGET_NR_getuid32:
6214 ret = get_errno(getuid());
6218 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6219 /* Alpha specific */
6220 case TARGET_NR_getxuid:
6224 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6226 ret = get_errno(getuid());
6229 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6230 /* Alpha specific */
6231 case TARGET_NR_getxgid:
6235 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6237 ret = get_errno(getgid());
6241 #ifdef TARGET_NR_getgid32
6242 case TARGET_NR_getgid32:
6243 ret = get_errno(getgid());
6246 #ifdef TARGET_NR_geteuid32
6247 case TARGET_NR_geteuid32:
6248 ret = get_errno(geteuid());
6251 #ifdef TARGET_NR_getegid32
6252 case TARGET_NR_getegid32:
6253 ret = get_errno(getegid());
6256 #ifdef TARGET_NR_setreuid32
6257 case TARGET_NR_setreuid32:
6258 ret = get_errno(setreuid(arg1, arg2));
6261 #ifdef TARGET_NR_setregid32
6262 case TARGET_NR_setregid32:
6263 ret = get_errno(setregid(arg1, arg2));
6266 #ifdef TARGET_NR_getgroups32
6267 case TARGET_NR_getgroups32:
6269 int gidsetsize = arg1;
6270 uint32_t *target_grouplist;
6274 grouplist = alloca(gidsetsize * sizeof(gid_t));
6275 ret = get_errno(getgroups(gidsetsize, grouplist));
6276 if (gidsetsize == 0)
6278 if (!is_error(ret)) {
6279 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6280 if (!target_grouplist) {
6281 ret = -TARGET_EFAULT;
6284 for(i = 0;i < ret; i++)
6285 target_grouplist[i] = tswap32(grouplist[i]);
6286 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6291 #ifdef TARGET_NR_setgroups32
6292 case TARGET_NR_setgroups32:
6294 int gidsetsize = arg1;
6295 uint32_t *target_grouplist;
6299 grouplist = alloca(gidsetsize * sizeof(gid_t));
6300 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6301 if (!target_grouplist) {
6302 ret = -TARGET_EFAULT;
6305 for(i = 0;i < gidsetsize; i++)
6306 grouplist[i] = tswap32(target_grouplist[i]);
6307 unlock_user(target_grouplist, arg2, 0);
6308 ret = get_errno(setgroups(gidsetsize, grouplist));
6312 #ifdef TARGET_NR_fchown32
6313 case TARGET_NR_fchown32:
6314 ret = get_errno(fchown(arg1, arg2, arg3));
6317 #ifdef TARGET_NR_setresuid32
6318 case TARGET_NR_setresuid32:
6319 ret = get_errno(setresuid(arg1, arg2, arg3));
6322 #ifdef TARGET_NR_getresuid32
6323 case TARGET_NR_getresuid32:
6325 uid_t ruid, euid, suid;
6326 ret = get_errno(getresuid(&ruid, &euid, &suid));
6327 if (!is_error(ret)) {
6328 if (put_user_u32(ruid, arg1)
6329 || put_user_u32(euid, arg2)
6330 || put_user_u32(suid, arg3))
6336 #ifdef TARGET_NR_setresgid32
6337 case TARGET_NR_setresgid32:
6338 ret = get_errno(setresgid(arg1, arg2, arg3));
6341 #ifdef TARGET_NR_getresgid32
6342 case TARGET_NR_getresgid32:
6344 gid_t rgid, egid, sgid;
6345 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6346 if (!is_error(ret)) {
6347 if (put_user_u32(rgid, arg1)
6348 || put_user_u32(egid, arg2)
6349 || put_user_u32(sgid, arg3))
6355 #ifdef TARGET_NR_chown32
6356 case TARGET_NR_chown32:
6357 if (!(p = lock_user_string(arg1)))
6359 ret = get_errno(chown(p, arg2, arg3));
6360 unlock_user(p, arg1, 0);
6363 #ifdef TARGET_NR_setuid32
6364 case TARGET_NR_setuid32:
6365 ret = get_errno(setuid(arg1));
6368 #ifdef TARGET_NR_setgid32
6369 case TARGET_NR_setgid32:
6370 ret = get_errno(setgid(arg1));
6373 #ifdef TARGET_NR_setfsuid32
6374 case TARGET_NR_setfsuid32:
6375 ret = get_errno(setfsuid(arg1));
6378 #ifdef TARGET_NR_setfsgid32
6379 case TARGET_NR_setfsgid32:
6380 ret = get_errno(setfsgid(arg1));
6384 case TARGET_NR_pivot_root:
6386 #ifdef TARGET_NR_mincore
6387 case TARGET_NR_mincore:
6390 ret = -TARGET_EFAULT;
6391 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6393 if (!(p = lock_user_string(arg3)))
6395 ret = get_errno(mincore(a, arg2, p));
6396 unlock_user(p, arg3, ret);
6398 unlock_user(a, arg1, 0);
6402 #ifdef TARGET_NR_arm_fadvise64_64
6403 case TARGET_NR_arm_fadvise64_64:
6406 * arm_fadvise64_64 looks like fadvise64_64 but
6407 * with different argument order
6415 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6416 #ifdef TARGET_NR_fadvise64_64
6417 case TARGET_NR_fadvise64_64:
6419 /* This is a hint, so ignoring and returning success is ok. */
6423 #ifdef TARGET_NR_madvise
6424 case TARGET_NR_madvise:
6425 /* A straight passthrough may not be safe because qemu sometimes
6426 turns private flie-backed mappings into anonymous mappings.
6427 This will break MADV_DONTNEED.
6428 This is a hint, so ignoring and returning success is ok. */
6432 #if TARGET_ABI_BITS == 32
6433 case TARGET_NR_fcntl64:
6437 struct target_flock64 *target_fl;
6439 struct target_eabi_flock64 *target_efl;
6443 case TARGET_F_GETLK64:
6446 case TARGET_F_SETLK64:
6449 case TARGET_F_SETLKW64:
6458 case TARGET_F_GETLK64:
6460 if (((CPUARMState *)cpu_env)->eabi) {
6461 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6463 fl.l_type = tswap16(target_efl->l_type);
6464 fl.l_whence = tswap16(target_efl->l_whence);
6465 fl.l_start = tswap64(target_efl->l_start);
6466 fl.l_len = tswap64(target_efl->l_len);
6467 fl.l_pid = tswapl(target_efl->l_pid);
6468 unlock_user_struct(target_efl, arg3, 0);
6472 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6474 fl.l_type = tswap16(target_fl->l_type);
6475 fl.l_whence = tswap16(target_fl->l_whence);
6476 fl.l_start = tswap64(target_fl->l_start);
6477 fl.l_len = tswap64(target_fl->l_len);
6478 fl.l_pid = tswapl(target_fl->l_pid);
6479 unlock_user_struct(target_fl, arg3, 0);
6481 ret = get_errno(fcntl(arg1, cmd, &fl));
6484 if (((CPUARMState *)cpu_env)->eabi) {
6485 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6487 target_efl->l_type = tswap16(fl.l_type);
6488 target_efl->l_whence = tswap16(fl.l_whence);
6489 target_efl->l_start = tswap64(fl.l_start);
6490 target_efl->l_len = tswap64(fl.l_len);
6491 target_efl->l_pid = tswapl(fl.l_pid);
6492 unlock_user_struct(target_efl, arg3, 1);
6496 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6498 target_fl->l_type = tswap16(fl.l_type);
6499 target_fl->l_whence = tswap16(fl.l_whence);
6500 target_fl->l_start = tswap64(fl.l_start);
6501 target_fl->l_len = tswap64(fl.l_len);
6502 target_fl->l_pid = tswapl(fl.l_pid);
6503 unlock_user_struct(target_fl, arg3, 1);
6508 case TARGET_F_SETLK64:
6509 case TARGET_F_SETLKW64:
6511 if (((CPUARMState *)cpu_env)->eabi) {
6512 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6514 fl.l_type = tswap16(target_efl->l_type);
6515 fl.l_whence = tswap16(target_efl->l_whence);
6516 fl.l_start = tswap64(target_efl->l_start);
6517 fl.l_len = tswap64(target_efl->l_len);
6518 fl.l_pid = tswapl(target_efl->l_pid);
6519 unlock_user_struct(target_efl, arg3, 0);
6523 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6525 fl.l_type = tswap16(target_fl->l_type);
6526 fl.l_whence = tswap16(target_fl->l_whence);
6527 fl.l_start = tswap64(target_fl->l_start);
6528 fl.l_len = tswap64(target_fl->l_len);
6529 fl.l_pid = tswapl(target_fl->l_pid);
6530 unlock_user_struct(target_fl, arg3, 0);
6532 ret = get_errno(fcntl(arg1, cmd, &fl));
6535 ret = do_fcntl(arg1, arg2, arg3);
6541 #ifdef TARGET_NR_cacheflush
6542 case TARGET_NR_cacheflush:
6543 /* self-modifying code is handled automatically, so nothing needed */
6547 #ifdef TARGET_NR_security
6548 case TARGET_NR_security:
6551 #ifdef TARGET_NR_getpagesize
6552 case TARGET_NR_getpagesize:
6553 ret = TARGET_PAGE_SIZE;
6556 case TARGET_NR_gettid:
6557 ret = get_errno(gettid());
6559 #ifdef TARGET_NR_readahead
6560 case TARGET_NR_readahead:
6561 #if TARGET_ABI_BITS == 32
6563 if (((CPUARMState *)cpu_env)->eabi)
6570 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6572 ret = get_errno(readahead(arg1, arg2, arg3));
6576 #ifdef TARGET_NR_setxattr
6577 case TARGET_NR_setxattr:
6578 case TARGET_NR_lsetxattr:
6579 case TARGET_NR_fsetxattr:
6580 case TARGET_NR_getxattr:
6581 case TARGET_NR_lgetxattr:
6582 case TARGET_NR_fgetxattr:
6583 case TARGET_NR_listxattr:
6584 case TARGET_NR_llistxattr:
6585 case TARGET_NR_flistxattr:
6586 case TARGET_NR_removexattr:
6587 case TARGET_NR_lremovexattr:
6588 case TARGET_NR_fremovexattr:
6589 goto unimplemented_nowarn;
6591 #ifdef TARGET_NR_set_thread_area
6592 case TARGET_NR_set_thread_area:
6593 #if defined(TARGET_MIPS)
6594 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6597 #elif defined(TARGET_CRIS)
6599 ret = -TARGET_EINVAL;
6601 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6605 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6606 ret = do_set_thread_area(cpu_env, arg1);
6609 goto unimplemented_nowarn;
6612 #ifdef TARGET_NR_get_thread_area
6613 case TARGET_NR_get_thread_area:
6614 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6615 ret = do_get_thread_area(cpu_env, arg1);
6617 goto unimplemented_nowarn;
6620 #ifdef TARGET_NR_getdomainname
6621 case TARGET_NR_getdomainname:
6622 goto unimplemented_nowarn;
6625 #ifdef TARGET_NR_clock_gettime
6626 case TARGET_NR_clock_gettime:
6629 ret = get_errno(clock_gettime(arg1, &ts));
6630 if (!is_error(ret)) {
6631 host_to_target_timespec(arg2, &ts);
6636 #ifdef TARGET_NR_clock_getres
6637 case TARGET_NR_clock_getres:
6640 ret = get_errno(clock_getres(arg1, &ts));
6641 if (!is_error(ret)) {
6642 host_to_target_timespec(arg2, &ts);
6647 #ifdef TARGET_NR_clock_nanosleep
6648 case TARGET_NR_clock_nanosleep:
6651 target_to_host_timespec(&ts, arg3);
6652 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6654 host_to_target_timespec(arg4, &ts);
6659 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6660 case TARGET_NR_set_tid_address:
6661 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6665 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6666 case TARGET_NR_tkill:
6667 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6671 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6672 case TARGET_NR_tgkill:
6673 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6674 target_to_host_signal(arg3)));
6678 #ifdef TARGET_NR_set_robust_list
6679 case TARGET_NR_set_robust_list:
6680 goto unimplemented_nowarn;
6683 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6684 case TARGET_NR_utimensat:
6686 struct timespec *tsp, ts[2];
6690 target_to_host_timespec(ts, arg3);
6691 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6695 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
6697 if (!(p = lock_user_string(arg2))) {
6698 ret = -TARGET_EFAULT;
6701 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
6702 unlock_user(p, arg2, 0);
6707 #if defined(USE_NPTL)
6708 case TARGET_NR_futex:
6709 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6712 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6713 case TARGET_NR_inotify_init:
6714 ret = get_errno(sys_inotify_init());
6717 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6718 case TARGET_NR_inotify_add_watch:
6719 p = lock_user_string(arg2);
6720 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6721 unlock_user(p, arg2, 0);
6724 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6725 case TARGET_NR_inotify_rm_watch:
6726 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6730 #ifdef TARGET_NR_mq_open
6731 case TARGET_NR_mq_open:
6733 struct mq_attr posix_mq_attr;
6735 p = lock_user_string(arg1 - 1);
6737 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6738 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6739 unlock_user (p, arg1, 0);
6743 case TARGET_NR_mq_unlink:
6744 p = lock_user_string(arg1 - 1);
6745 ret = get_errno(mq_unlink(p));
6746 unlock_user (p, arg1, 0);
6749 case TARGET_NR_mq_timedsend:
6753 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6755 target_to_host_timespec(&ts, arg5);
6756 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6757 host_to_target_timespec(arg5, &ts);
6760 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6761 unlock_user (p, arg2, arg3);
6765 case TARGET_NR_mq_timedreceive:
6770 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6772 target_to_host_timespec(&ts, arg5);
6773 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6774 host_to_target_timespec(arg5, &ts);
6777 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6778 unlock_user (p, arg2, arg3);
6780 put_user_u32(prio, arg4);
6784 /* Not implemented for now... */
6785 /* case TARGET_NR_mq_notify: */
6788 case TARGET_NR_mq_getsetattr:
6790 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6793 ret = mq_getattr(arg1, &posix_mq_attr_out);
6794 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6797 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6798 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6807 gemu_log("qemu: Unsupported syscall: %d\n", num);
6808 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6809 unimplemented_nowarn:
6811 ret = -TARGET_ENOSYS;
6816 gemu_log(" = %ld\n", ret);
6819 print_syscall_ret(num, ret);
6822 ret = -TARGET_EFAULT;