4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
80 #include "linux_loop.h"
83 #include "qemu-common.h"
86 #include <linux/futex.h>
87 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
88 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
90 /* XXX: Hardcode the above values. */
91 #define CLONE_NPTL_FLAGS2 0
96 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
97 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
98 /* 16 bit uid wrappers emulation */
102 //#include <linux/msdos_fs.h>
103 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
104 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
115 #define _syscall0(type,name) \
116 static type name (void) \
118 return syscall(__NR_##name); \
121 #define _syscall1(type,name,type1,arg1) \
122 static type name (type1 arg1) \
124 return syscall(__NR_##name, arg1); \
127 #define _syscall2(type,name,type1,arg1,type2,arg2) \
128 static type name (type1 arg1,type2 arg2) \
130 return syscall(__NR_##name, arg1, arg2); \
133 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
134 static type name (type1 arg1,type2 arg2,type3 arg3) \
136 return syscall(__NR_##name, arg1, arg2, arg3); \
139 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
140 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
142 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
145 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
147 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
149 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
153 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
154 type5,arg5,type6,arg6) \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
162 #define __NR_sys_uname __NR_uname
163 #define __NR_sys_faccessat __NR_faccessat
164 #define __NR_sys_fchmodat __NR_fchmodat
165 #define __NR_sys_fchownat __NR_fchownat
166 #define __NR_sys_fstatat64 __NR_fstatat64
167 #define __NR_sys_futimesat __NR_futimesat
168 #define __NR_sys_getcwd1 __NR_getcwd
169 #define __NR_sys_getdents __NR_getdents
170 #define __NR_sys_getdents64 __NR_getdents64
171 #define __NR_sys_getpriority __NR_getpriority
172 #define __NR_sys_linkat __NR_linkat
173 #define __NR_sys_mkdirat __NR_mkdirat
174 #define __NR_sys_mknodat __NR_mknodat
175 #define __NR_sys_newfstatat __NR_newfstatat
176 #define __NR_sys_openat __NR_openat
177 #define __NR_sys_readlinkat __NR_readlinkat
178 #define __NR_sys_renameat __NR_renameat
179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
180 #define __NR_sys_symlinkat __NR_symlinkat
181 #define __NR_sys_syslog __NR_syslog
182 #define __NR_sys_tgkill __NR_tgkill
183 #define __NR_sys_tkill __NR_tkill
184 #define __NR_sys_unlinkat __NR_unlinkat
185 #define __NR_sys_utimensat __NR_utimensat
186 #define __NR_sys_futex __NR_futex
187 #define __NR_sys_inotify_init __NR_inotify_init
188 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
189 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
191 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
192 #define __NR__llseek __NR_lseek
196 _syscall0(int, gettid)
198 /* This is a replacement for the host gettid() and must return a host
200 static int gettid(void) {
204 _syscall1(int,sys_uname,struct new_utsname *,buf)
205 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
206 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
208 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
209 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
210 mode_t,mode,int,flags)
212 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
213 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
214 uid_t,owner,gid_t,group,int,flags)
216 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
217 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
218 struct stat *,buf,int,flags)
220 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
221 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
222 const struct timeval *,times)
224 #if TARGET_ABI_BITS == 32
225 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
228 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
230 _syscall2(int, sys_getpriority, int, which, int, who);
231 #if !defined (__x86_64__)
232 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
233 loff_t *, res, uint, wh);
235 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
236 defined(__NR_newfstatat)
237 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
238 struct stat *,buf,int,flags)
240 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
241 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
242 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
243 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
245 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
246 _syscall2(int,sys_tkill,int,tid,int,sig)
248 #ifdef __NR_exit_group
249 _syscall1(int,exit_group,int,error_code)
251 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
252 _syscall1(int,set_tid_address,int *,tidptr)
254 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
255 _syscall0(int,sys_inotify_init)
257 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
258 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
260 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
261 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
263 #if defined(USE_NPTL)
264 #if defined(TARGET_NR_futex) && defined(__NR_futex)
265 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
266 const struct timespec *,timeout,int *,uaddr2,int,val3)
270 static bitmask_transtbl fcntl_flags_tbl[] = {
271 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
272 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
273 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
274 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
275 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
276 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
277 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
278 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
279 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
280 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
281 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
282 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #if defined(O_DIRECT)
285 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
291 sys_uname(struct new_utsname *buf)
293 struct utsname uts_buf;
295 if (uname(&uts_buf) < 0)
299 * Just in case these have some differences, we
300 * translate utsname to new_utsname (which is the
301 * struct linux kernel uses).
304 #define COPY_UTSNAME_FIELD(dest, src) \
306 /* __NEW_UTS_LEN doesn't include terminating null */ \
307 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
308 (dest)[__NEW_UTS_LEN] = '\0'; \
311 bzero(buf, sizeof (*buf));
312 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
313 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
314 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
315 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
316 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
318 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
322 #undef COPY_UTSNAME_FIELD
326 sys_getcwd1(char *buf, size_t size)
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
338 * Host system seems to have atfile syscall stubs available. We
339 * now enable them one by one as specified by target syscall_nr.h.
342 #ifdef TARGET_NR_openat
344 sys_openat(int dirfd, const char *pathname, int flags, ...)
347 * open(2) has extra parameter 'mode' when called with
350 if ((flags & O_CREAT) != 0) {
355 * Get the 'mode' parameter and translate it to
359 mode = va_arg(ap, mode_t);
360 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
363 return (openat(dirfd, pathname, flags, mode));
365 return (openat(dirfd, pathname, flags));
369 #ifdef TARGET_NR_mkdirat
371 sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 return (mkdirat(dirfd, pathname, mode));
377 #ifdef TARGET_NR_mknodat
379 sys_mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev)
381 return (mknodat(dirfd, pathname, mode, dev));
385 #ifdef TARGET_NR_fchownat
387 sys_fchownat(int dirfd, const char *pathname, uid_t owner,
388 gid_t group, int flags)
390 return (fchownat(dirfd, pathname, owner, group, flags));
394 #ifdef TARGET_NR_fstatat
396 sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
399 return (fstatat64(dirfd, pathname, buf, flags));
403 #ifdef TARGET_NR_unlinkat
405 sys_unlinkat(int dirfd, const char *pathname, int flags)
407 return (unlinkat(dirfd, pathname, flags));
411 #ifdef TARGET_NR_renameat
413 sys_renameat(int olddirfd, const char *oldpath,
414 int newdirfd, const char *newpath)
416 return (renameat(olddirfd, oldpath, newdirfd, newpath));
420 #ifdef TARGET_NR_linkat
422 sys_linkat(int olddirfd, const char *oldpath,
423 int newdirfd, const char *newpath, int flags)
425 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
429 #ifdef TARGET_NR_symlinkat
431 sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
433 return (symlinkat(oldpath, newdirfd, newpath));
437 #ifdef TARGET_NR_readlinkat
439 sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
441 return (readlinkat(dirfd, pathname, buf, bufsiz));
445 #ifdef TARGET_NR_fchmodat
447 sys_fchmodat(int dirfd, const char *pathname, mode_t mode, int flags)
449 return (fchmodat(dirfd, pathname, mode, flags));
453 #ifdef TARGET_NR_faccessat
455 sys_faccessat(int dirfd, const char *pathname, int mode, int flags)
457 return (faccessat(dirfd, pathname, mode, flags));
461 #ifdef TARGET_NR_utimensat
463 sys_utimensat(int dirfd, const char *pathname,
464 const struct timespec times[2], int flags)
466 return (utimensat(dirfd, pathname, times, flags));
470 #else /* !CONFIG_ATFILE */
473 * Host system doesn't have these available so we don't try
477 #undef TARGET_NR_openat
478 #undef TARGET_NR_mkdirat
479 #undef TARGET_NR_mknodat
480 #undef TARGET_NR_fchownat
481 #undef TARGET_NR_fstatat
482 #undef TARGET_NR_unlinkat
483 #undef TARGET_NR_renameat
484 #undef TARGET_NR_linkat
485 #undef TARGET_NR_symlinkat
486 #undef TARGET_NR_readlinkat
487 #undef TARGET_NR_fchmodat
488 #undef TARGET_NR_faccessat
489 #undef TARGET_NR_utimensat
491 #endif /* CONFIG_ATFILE */
494 extern int personality(int);
495 extern int flock(int, int);
496 extern int setfsuid(int);
497 extern int setfsgid(int);
498 extern int setgroups(int, gid_t *);
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512 [EIDRM] = TARGET_EIDRM,
513 [ECHRNG] = TARGET_ECHRNG,
514 [EL2NSYNC] = TARGET_EL2NSYNC,
515 [EL3HLT] = TARGET_EL3HLT,
516 [EL3RST] = TARGET_EL3RST,
517 [ELNRNG] = TARGET_ELNRNG,
518 [EUNATCH] = TARGET_EUNATCH,
519 [ENOCSI] = TARGET_ENOCSI,
520 [EL2HLT] = TARGET_EL2HLT,
521 [EDEADLK] = TARGET_EDEADLK,
522 [ENOLCK] = TARGET_ENOLCK,
523 [EBADE] = TARGET_EBADE,
524 [EBADR] = TARGET_EBADR,
525 [EXFULL] = TARGET_EXFULL,
526 [ENOANO] = TARGET_ENOANO,
527 [EBADRQC] = TARGET_EBADRQC,
528 [EBADSLT] = TARGET_EBADSLT,
529 [EBFONT] = TARGET_EBFONT,
530 [ENOSTR] = TARGET_ENOSTR,
531 [ENODATA] = TARGET_ENODATA,
532 [ETIME] = TARGET_ETIME,
533 [ENOSR] = TARGET_ENOSR,
534 [ENONET] = TARGET_ENONET,
535 [ENOPKG] = TARGET_ENOPKG,
536 [EREMOTE] = TARGET_EREMOTE,
537 [ENOLINK] = TARGET_ENOLINK,
538 [EADV] = TARGET_EADV,
539 [ESRMNT] = TARGET_ESRMNT,
540 [ECOMM] = TARGET_ECOMM,
541 [EPROTO] = TARGET_EPROTO,
542 [EDOTDOT] = TARGET_EDOTDOT,
543 [EMULTIHOP] = TARGET_EMULTIHOP,
544 [EBADMSG] = TARGET_EBADMSG,
545 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
546 [EOVERFLOW] = TARGET_EOVERFLOW,
547 [ENOTUNIQ] = TARGET_ENOTUNIQ,
548 [EBADFD] = TARGET_EBADFD,
549 [EREMCHG] = TARGET_EREMCHG,
550 [ELIBACC] = TARGET_ELIBACC,
551 [ELIBBAD] = TARGET_ELIBBAD,
552 [ELIBSCN] = TARGET_ELIBSCN,
553 [ELIBMAX] = TARGET_ELIBMAX,
554 [ELIBEXEC] = TARGET_ELIBEXEC,
555 [EILSEQ] = TARGET_EILSEQ,
556 [ENOSYS] = TARGET_ENOSYS,
557 [ELOOP] = TARGET_ELOOP,
558 [ERESTART] = TARGET_ERESTART,
559 [ESTRPIPE] = TARGET_ESTRPIPE,
560 [ENOTEMPTY] = TARGET_ENOTEMPTY,
561 [EUSERS] = TARGET_EUSERS,
562 [ENOTSOCK] = TARGET_ENOTSOCK,
563 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
564 [EMSGSIZE] = TARGET_EMSGSIZE,
565 [EPROTOTYPE] = TARGET_EPROTOTYPE,
566 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
567 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
568 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
569 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
570 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
571 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
572 [EADDRINUSE] = TARGET_EADDRINUSE,
573 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
574 [ENETDOWN] = TARGET_ENETDOWN,
575 [ENETUNREACH] = TARGET_ENETUNREACH,
576 [ENETRESET] = TARGET_ENETRESET,
577 [ECONNABORTED] = TARGET_ECONNABORTED,
578 [ECONNRESET] = TARGET_ECONNRESET,
579 [ENOBUFS] = TARGET_ENOBUFS,
580 [EISCONN] = TARGET_EISCONN,
581 [ENOTCONN] = TARGET_ENOTCONN,
582 [EUCLEAN] = TARGET_EUCLEAN,
583 [ENOTNAM] = TARGET_ENOTNAM,
584 [ENAVAIL] = TARGET_ENAVAIL,
585 [EISNAM] = TARGET_EISNAM,
586 [EREMOTEIO] = TARGET_EREMOTEIO,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
600 [ENOKEY] = TARGET_ENOKEY,
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
619 static inline int host_to_target_errno(int err)
621 if(host_to_target_errno_table[err])
622 return host_to_target_errno_table[err];
626 static inline int target_to_host_errno(int err)
628 if (target_to_host_errno_table[err])
629 return target_to_host_errno_table[err];
633 static inline abi_long get_errno(abi_long ret)
636 return -host_to_target_errno(errno);
641 static inline int is_error(abi_long ret)
643 return (abi_ulong)ret >= (abi_ulong)(-4096);
646 char *target_strerror(int err)
648 return strerror(target_to_host_errno(err));
651 static abi_ulong target_brk;
652 static abi_ulong target_original_brk;
654 void target_set_brk(abi_ulong new_brk)
656 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
659 /* do_brk() must return target values and target errnos. */
660 abi_long do_brk(abi_ulong new_brk)
663 abi_long mapped_addr;
668 if (new_brk < target_original_brk)
671 brk_page = HOST_PAGE_ALIGN(target_brk);
673 /* If the new brk is less than this, set it and we're done... */
674 if (new_brk < brk_page) {
675 target_brk = new_brk;
679 /* We need to allocate more memory after the brk... */
680 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
681 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
682 PROT_READ|PROT_WRITE,
683 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
685 if (!is_error(mapped_addr))
686 target_brk = new_brk;
691 static inline abi_long copy_from_user_fdset(fd_set *fds,
692 abi_ulong target_fds_addr,
696 abi_ulong b, *target_fds;
698 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
699 if (!(target_fds = lock_user(VERIFY_READ,
701 sizeof(abi_ulong) * nw,
703 return -TARGET_EFAULT;
707 for (i = 0; i < nw; i++) {
708 /* grab the abi_ulong */
709 __get_user(b, &target_fds[i]);
710 for (j = 0; j < TARGET_ABI_BITS; j++) {
711 /* check the bit inside the abi_ulong */
718 unlock_user(target_fds, target_fds_addr, 0);
723 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
729 abi_ulong *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_WRITE,
734 sizeof(abi_ulong) * nw,
736 return -TARGET_EFAULT;
739 for (i = 0; i < nw; i++) {
741 for (j = 0; j < TARGET_ABI_BITS; j++) {
742 v |= ((FD_ISSET(k, fds) != 0) << j);
745 __put_user(v, &target_fds[i]);
748 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
753 #if defined(__alpha__)
759 static inline abi_long host_to_target_clock_t(long ticks)
761 #if HOST_HZ == TARGET_HZ
764 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
768 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
769 const struct rusage *rusage)
771 struct target_rusage *target_rusage;
773 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
774 return -TARGET_EFAULT;
775 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
776 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
777 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
778 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
779 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
780 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
781 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
782 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
783 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
784 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
785 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
786 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
787 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
788 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
789 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
790 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
791 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
792 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
793 unlock_user_struct(target_rusage, target_addr, 1);
798 static inline abi_long copy_from_user_timeval(struct timeval *tv,
799 abi_ulong target_tv_addr)
801 struct target_timeval *target_tv;
803 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
804 return -TARGET_EFAULT;
806 __get_user(tv->tv_sec, &target_tv->tv_sec);
807 __get_user(tv->tv_usec, &target_tv->tv_usec);
809 unlock_user_struct(target_tv, target_tv_addr, 0);
814 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
815 const struct timeval *tv)
817 struct target_timeval *target_tv;
819 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
820 return -TARGET_EFAULT;
822 __put_user(tv->tv_sec, &target_tv->tv_sec);
823 __put_user(tv->tv_usec, &target_tv->tv_usec);
825 unlock_user_struct(target_tv, target_tv_addr, 1);
830 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
831 abi_ulong target_mq_attr_addr)
833 struct target_mq_attr *target_mq_attr;
835 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
836 target_mq_attr_addr, 1))
837 return -TARGET_EFAULT;
839 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
840 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
841 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
842 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
844 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
849 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
850 const struct mq_attr *attr)
852 struct target_mq_attr *target_mq_attr;
854 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
855 target_mq_attr_addr, 0))
856 return -TARGET_EFAULT;
858 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
859 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
860 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
861 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
863 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
868 /* do_select() must return target values and target errnos. */
869 static abi_long do_select(int n,
870 abi_ulong rfd_addr, abi_ulong wfd_addr,
871 abi_ulong efd_addr, abi_ulong target_tv_addr)
873 fd_set rfds, wfds, efds;
874 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
875 struct timeval tv, *tv_ptr;
879 if (copy_from_user_fdset(&rfds, rfd_addr, n))
880 return -TARGET_EFAULT;
886 if (copy_from_user_fdset(&wfds, wfd_addr, n))
887 return -TARGET_EFAULT;
893 if (copy_from_user_fdset(&efds, efd_addr, n))
894 return -TARGET_EFAULT;
900 if (target_tv_addr) {
901 if (copy_from_user_timeval(&tv, target_tv_addr))
902 return -TARGET_EFAULT;
908 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
910 if (!is_error(ret)) {
911 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
912 return -TARGET_EFAULT;
913 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
914 return -TARGET_EFAULT;
915 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
916 return -TARGET_EFAULT;
918 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
919 return -TARGET_EFAULT;
925 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
926 abi_ulong target_addr,
929 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
930 sa_family_t sa_family;
931 struct target_sockaddr *target_saddr;
933 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
935 return -TARGET_EFAULT;
937 sa_family = tswap16(target_saddr->sa_family);
939 /* Oops. The caller might send a incomplete sun_path; sun_path
940 * must be terminated by \0 (see the manual page), but
941 * unfortunately it is quite common to specify sockaddr_un
942 * length as "strlen(x->sun_path)" while it should be
943 * "strlen(...) + 1". We'll fix that here if needed.
944 * Linux kernel has a similar feature.
947 if (sa_family == AF_UNIX) {
948 if (len < unix_maxlen) {
949 char *cp = (char*)target_saddr;
951 if ( cp[len-1] && !cp[len] )
954 if (len > unix_maxlen)
958 memcpy(addr, target_saddr, len);
959 addr->sa_family = sa_family;
960 unlock_user(target_saddr, target_addr, 0);
965 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
966 struct sockaddr *addr,
969 struct target_sockaddr *target_saddr;
971 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
973 return -TARGET_EFAULT;
974 memcpy(target_saddr, addr, len);
975 target_saddr->sa_family = tswap16(addr->sa_family);
976 unlock_user(target_saddr, target_addr, len);
981 /* ??? Should this also swap msgh->name? */
982 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
983 struct target_msghdr *target_msgh)
985 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
986 abi_long msg_controllen;
987 abi_ulong target_cmsg_addr;
988 struct target_cmsghdr *target_cmsg;
991 msg_controllen = tswapl(target_msgh->msg_controllen);
992 if (msg_controllen < sizeof (struct target_cmsghdr))
994 target_cmsg_addr = tswapl(target_msgh->msg_control);
995 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
997 return -TARGET_EFAULT;
999 while (cmsg && target_cmsg) {
1000 void *data = CMSG_DATA(cmsg);
1001 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1003 int len = tswapl(target_cmsg->cmsg_len)
1004 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1006 space += CMSG_SPACE(len);
1007 if (space > msgh->msg_controllen) {
1008 space -= CMSG_SPACE(len);
1009 gemu_log("Host cmsg overflow\n");
1013 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1014 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1015 cmsg->cmsg_len = CMSG_LEN(len);
1017 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1018 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1019 memcpy(data, target_data, len);
1021 int *fd = (int *)data;
1022 int *target_fd = (int *)target_data;
1023 int i, numfds = len / sizeof(int);
1025 for (i = 0; i < numfds; i++)
1026 fd[i] = tswap32(target_fd[i]);
1029 cmsg = CMSG_NXTHDR(msgh, cmsg);
1030 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1032 unlock_user(target_cmsg, target_cmsg_addr, 0);
1034 msgh->msg_controllen = space;
1038 /* ??? Should this also swap msgh->name? */
1039 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1040 struct msghdr *msgh)
1042 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1043 abi_long msg_controllen;
1044 abi_ulong target_cmsg_addr;
1045 struct target_cmsghdr *target_cmsg;
1046 socklen_t space = 0;
1048 msg_controllen = tswapl(target_msgh->msg_controllen);
1049 if (msg_controllen < sizeof (struct target_cmsghdr))
1051 target_cmsg_addr = tswapl(target_msgh->msg_control);
1052 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1054 return -TARGET_EFAULT;
1056 while (cmsg && target_cmsg) {
1057 void *data = CMSG_DATA(cmsg);
1058 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1060 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1062 space += TARGET_CMSG_SPACE(len);
1063 if (space > msg_controllen) {
1064 space -= TARGET_CMSG_SPACE(len);
1065 gemu_log("Target cmsg overflow\n");
1069 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1070 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1071 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1073 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1074 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1075 memcpy(target_data, data, len);
1077 int *fd = (int *)data;
1078 int *target_fd = (int *)target_data;
1079 int i, numfds = len / sizeof(int);
1081 for (i = 0; i < numfds; i++)
1082 target_fd[i] = tswap32(fd[i]);
1085 cmsg = CMSG_NXTHDR(msgh, cmsg);
1086 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1088 unlock_user(target_cmsg, target_cmsg_addr, space);
1090 target_msgh->msg_controllen = tswapl(space);
1094 /* do_setsockopt() Must return target values and target errnos. */
1095 static abi_long do_setsockopt(int sockfd, int level, int optname,
1096 abi_ulong optval_addr, socklen_t optlen)
1103 /* TCP options all take an 'int' value. */
1104 if (optlen < sizeof(uint32_t))
1105 return -TARGET_EINVAL;
1107 if (get_user_u32(val, optval_addr))
1108 return -TARGET_EFAULT;
1109 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1116 case IP_ROUTER_ALERT:
1120 case IP_MTU_DISCOVER:
1126 case IP_MULTICAST_TTL:
1127 case IP_MULTICAST_LOOP:
1129 if (optlen >= sizeof(uint32_t)) {
1130 if (get_user_u32(val, optval_addr))
1131 return -TARGET_EFAULT;
1132 } else if (optlen >= 1) {
1133 if (get_user_u8(val, optval_addr))
1134 return -TARGET_EFAULT;
1136 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1142 case TARGET_SOL_SOCKET:
1144 /* Options with 'int' argument. */
1145 case TARGET_SO_DEBUG:
1148 case TARGET_SO_REUSEADDR:
1149 optname = SO_REUSEADDR;
1151 case TARGET_SO_TYPE:
1154 case TARGET_SO_ERROR:
1157 case TARGET_SO_DONTROUTE:
1158 optname = SO_DONTROUTE;
1160 case TARGET_SO_BROADCAST:
1161 optname = SO_BROADCAST;
1163 case TARGET_SO_SNDBUF:
1164 optname = SO_SNDBUF;
1166 case TARGET_SO_RCVBUF:
1167 optname = SO_RCVBUF;
1169 case TARGET_SO_KEEPALIVE:
1170 optname = SO_KEEPALIVE;
1172 case TARGET_SO_OOBINLINE:
1173 optname = SO_OOBINLINE;
1175 case TARGET_SO_NO_CHECK:
1176 optname = SO_NO_CHECK;
1178 case TARGET_SO_PRIORITY:
1179 optname = SO_PRIORITY;
1182 case TARGET_SO_BSDCOMPAT:
1183 optname = SO_BSDCOMPAT;
1186 case TARGET_SO_PASSCRED:
1187 optname = SO_PASSCRED;
1189 case TARGET_SO_TIMESTAMP:
1190 optname = SO_TIMESTAMP;
1192 case TARGET_SO_RCVLOWAT:
1193 optname = SO_RCVLOWAT;
1195 case TARGET_SO_RCVTIMEO:
1196 optname = SO_RCVTIMEO;
1198 case TARGET_SO_SNDTIMEO:
1199 optname = SO_SNDTIMEO;
1205 if (optlen < sizeof(uint32_t))
1206 return -TARGET_EINVAL;
1208 if (get_user_u32(val, optval_addr))
1209 return -TARGET_EFAULT;
1210 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1214 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1215 ret = -TARGET_ENOPROTOOPT;
1220 /* do_getsockopt() Must return target values and target errnos. */
1221 static abi_long do_getsockopt(int sockfd, int level, int optname,
1222 abi_ulong optval_addr, abi_ulong optlen)
1229 case TARGET_SOL_SOCKET:
1232 case TARGET_SO_LINGER:
1233 case TARGET_SO_RCVTIMEO:
1234 case TARGET_SO_SNDTIMEO:
1235 case TARGET_SO_PEERCRED:
1236 case TARGET_SO_PEERNAME:
1237 /* These don't just return a single integer */
1244 /* TCP options all take an 'int' value. */
1246 if (get_user_u32(len, optlen))
1247 return -TARGET_EFAULT;
1249 return -TARGET_EINVAL;
1251 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1258 if (put_user_u32(val, optval_addr))
1259 return -TARGET_EFAULT;
1261 if (put_user_u8(val, optval_addr))
1262 return -TARGET_EFAULT;
1264 if (put_user_u32(len, optlen))
1265 return -TARGET_EFAULT;
1272 case IP_ROUTER_ALERT:
1276 case IP_MTU_DISCOVER:
1282 case IP_MULTICAST_TTL:
1283 case IP_MULTICAST_LOOP:
1284 if (get_user_u32(len, optlen))
1285 return -TARGET_EFAULT;
1287 return -TARGET_EINVAL;
1289 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1292 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1294 if (put_user_u32(len, optlen)
1295 || put_user_u8(val, optval_addr))
1296 return -TARGET_EFAULT;
1298 if (len > sizeof(int))
1300 if (put_user_u32(len, optlen)
1301 || put_user_u32(val, optval_addr))
1302 return -TARGET_EFAULT;
1306 ret = -TARGET_ENOPROTOOPT;
1312 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1314 ret = -TARGET_EOPNOTSUPP;
1321 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1322 * other lock functions have a return code of 0 for failure.
1324 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1325 int count, int copy)
1327 struct target_iovec *target_vec;
1331 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1333 return -TARGET_EFAULT;
1334 for(i = 0;i < count; i++) {
1335 base = tswapl(target_vec[i].iov_base);
1336 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1337 if (vec[i].iov_len != 0) {
1338 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1339 /* Don't check lock_user return value. We must call writev even
1340 if a element has invalid base address. */
1342 /* zero length pointer is ignored */
1343 vec[i].iov_base = NULL;
1346 unlock_user (target_vec, target_addr, 0);
1350 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1351 int count, int copy)
1353 struct target_iovec *target_vec;
1357 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1359 return -TARGET_EFAULT;
1360 for(i = 0;i < count; i++) {
1361 if (target_vec[i].iov_base) {
1362 base = tswapl(target_vec[i].iov_base);
1363 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1366 unlock_user (target_vec, target_addr, 0);
1371 /* do_socket() Must return target values and target errnos. */
1372 static abi_long do_socket(int domain, int type, int protocol)
1374 #if defined(TARGET_MIPS)
1376 case TARGET_SOCK_DGRAM:
1379 case TARGET_SOCK_STREAM:
1382 case TARGET_SOCK_RAW:
1385 case TARGET_SOCK_RDM:
1388 case TARGET_SOCK_SEQPACKET:
1389 type = SOCK_SEQPACKET;
1391 case TARGET_SOCK_PACKET:
1396 if (domain == PF_NETLINK)
1397 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1398 return get_errno(socket(domain, type, protocol));
1401 /* MAX_SOCK_ADDR from linux/net/socket.c */
1402 #define MAX_SOCK_ADDR 128
1404 /* do_bind() Must return target values and target errnos. */
1405 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1410 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1411 return -TARGET_EINVAL;
1413 addr = alloca(addrlen+1);
1415 target_to_host_sockaddr(addr, target_addr, addrlen);
1416 return get_errno(bind(sockfd, addr, addrlen));
1419 /* do_connect() Must return target values and target errnos. */
1420 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1425 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1426 return -TARGET_EINVAL;
1428 addr = alloca(addrlen);
1430 target_to_host_sockaddr(addr, target_addr, addrlen);
1431 return get_errno(connect(sockfd, addr, addrlen));
1434 /* do_sendrecvmsg() Must return target values and target errnos. */
1435 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1436 int flags, int send)
1439 struct target_msghdr *msgp;
1443 abi_ulong target_vec;
1446 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1450 return -TARGET_EFAULT;
1451 if (msgp->msg_name) {
1452 msg.msg_namelen = tswap32(msgp->msg_namelen);
1453 msg.msg_name = alloca(msg.msg_namelen);
1454 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1457 msg.msg_name = NULL;
1458 msg.msg_namelen = 0;
1460 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1461 msg.msg_control = alloca(msg.msg_controllen);
1462 msg.msg_flags = tswap32(msgp->msg_flags);
1464 count = tswapl(msgp->msg_iovlen);
1465 vec = alloca(count * sizeof(struct iovec));
1466 target_vec = tswapl(msgp->msg_iov);
1467 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1468 msg.msg_iovlen = count;
1472 ret = target_to_host_cmsg(&msg, msgp);
1474 ret = get_errno(sendmsg(fd, &msg, flags));
1476 ret = get_errno(recvmsg(fd, &msg, flags));
1477 if (!is_error(ret)) {
1479 ret = host_to_target_cmsg(msgp, &msg);
1484 unlock_iovec(vec, target_vec, count, !send);
1485 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1489 /* do_accept() Must return target values and target errnos. */
1490 static abi_long do_accept(int fd, abi_ulong target_addr,
1491 abi_ulong target_addrlen_addr)
1497 if (target_addr == 0)
1498 return get_errno(accept(fd, NULL, NULL));
1500 if (get_user_u32(addrlen, target_addrlen_addr))
1501 return -TARGET_EFAULT;
1503 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1504 return -TARGET_EINVAL;
1506 addr = alloca(addrlen);
1508 ret = get_errno(accept(fd, addr, &addrlen));
1509 if (!is_error(ret)) {
1510 host_to_target_sockaddr(target_addr, addr, addrlen);
1511 if (put_user_u32(addrlen, target_addrlen_addr))
1512 ret = -TARGET_EFAULT;
1517 /* do_getpeername() Must return target values and target errnos. */
1518 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1519 abi_ulong target_addrlen_addr)
1525 if (get_user_u32(addrlen, target_addrlen_addr))
1526 return -TARGET_EFAULT;
1528 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1529 return -TARGET_EINVAL;
1531 addr = alloca(addrlen);
1533 ret = get_errno(getpeername(fd, addr, &addrlen));
1534 if (!is_error(ret)) {
1535 host_to_target_sockaddr(target_addr, addr, addrlen);
1536 if (put_user_u32(addrlen, target_addrlen_addr))
1537 ret = -TARGET_EFAULT;
1542 /* do_getsockname() Must return target values and target errnos. */
1543 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1544 abi_ulong target_addrlen_addr)
1550 if (target_addr == 0)
1551 return get_errno(accept(fd, NULL, NULL));
1553 if (get_user_u32(addrlen, target_addrlen_addr))
1554 return -TARGET_EFAULT;
1556 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1557 return -TARGET_EINVAL;
1559 addr = alloca(addrlen);
1561 ret = get_errno(getsockname(fd, addr, &addrlen));
1562 if (!is_error(ret)) {
1563 host_to_target_sockaddr(target_addr, addr, addrlen);
1564 if (put_user_u32(addrlen, target_addrlen_addr))
1565 ret = -TARGET_EFAULT;
1570 /* do_socketpair() Must return target values and target errnos. */
1571 static abi_long do_socketpair(int domain, int type, int protocol,
1572 abi_ulong target_tab_addr)
1577 ret = get_errno(socketpair(domain, type, protocol, tab));
1578 if (!is_error(ret)) {
1579 if (put_user_s32(tab[0], target_tab_addr)
1580 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1581 ret = -TARGET_EFAULT;
1586 /* do_sendto() Must return target values and target errnos. */
1587 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1588 abi_ulong target_addr, socklen_t addrlen)
1594 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1595 return -TARGET_EINVAL;
1597 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1599 return -TARGET_EFAULT;
1601 addr = alloca(addrlen);
1602 target_to_host_sockaddr(addr, target_addr, addrlen);
1603 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1605 ret = get_errno(send(fd, host_msg, len, flags));
1607 unlock_user(host_msg, msg, 0);
1611 /* do_recvfrom() Must return target values and target errnos. */
1612 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1613 abi_ulong target_addr,
1614 abi_ulong target_addrlen)
1621 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1623 return -TARGET_EFAULT;
1625 if (get_user_u32(addrlen, target_addrlen)) {
1626 ret = -TARGET_EFAULT;
1629 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1630 ret = -TARGET_EINVAL;
1633 addr = alloca(addrlen);
1634 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1636 addr = NULL; /* To keep compiler quiet. */
1637 ret = get_errno(recv(fd, host_msg, len, flags));
1639 if (!is_error(ret)) {
1641 host_to_target_sockaddr(target_addr, addr, addrlen);
1642 if (put_user_u32(addrlen, target_addrlen)) {
1643 ret = -TARGET_EFAULT;
1647 unlock_user(host_msg, msg, len);
1650 unlock_user(host_msg, msg, 0);
1655 #ifdef TARGET_NR_socketcall
1656 /* do_socketcall() Must return target values and target errnos. */
1657 static abi_long do_socketcall(int num, abi_ulong vptr)
1660 const int n = sizeof(abi_ulong);
1665 int domain, type, protocol;
1667 if (get_user_s32(domain, vptr)
1668 || get_user_s32(type, vptr + n)
1669 || get_user_s32(protocol, vptr + 2 * n))
1670 return -TARGET_EFAULT;
1672 ret = do_socket(domain, type, protocol);
1678 abi_ulong target_addr;
1681 if (get_user_s32(sockfd, vptr)
1682 || get_user_ual(target_addr, vptr + n)
1683 || get_user_u32(addrlen, vptr + 2 * n))
1684 return -TARGET_EFAULT;
1686 ret = do_bind(sockfd, target_addr, addrlen);
1689 case SOCKOP_connect:
1692 abi_ulong target_addr;
1695 if (get_user_s32(sockfd, vptr)
1696 || get_user_ual(target_addr, vptr + n)
1697 || get_user_u32(addrlen, vptr + 2 * n))
1698 return -TARGET_EFAULT;
1700 ret = do_connect(sockfd, target_addr, addrlen);
1705 int sockfd, backlog;
1707 if (get_user_s32(sockfd, vptr)
1708 || get_user_s32(backlog, vptr + n))
1709 return -TARGET_EFAULT;
1711 ret = get_errno(listen(sockfd, backlog));
1717 abi_ulong target_addr, target_addrlen;
1719 if (get_user_s32(sockfd, vptr)
1720 || get_user_ual(target_addr, vptr + n)
1721 || get_user_u32(target_addrlen, vptr + 2 * n))
1722 return -TARGET_EFAULT;
1724 ret = do_accept(sockfd, target_addr, target_addrlen);
1727 case SOCKOP_getsockname:
1730 abi_ulong target_addr, target_addrlen;
1732 if (get_user_s32(sockfd, vptr)
1733 || get_user_ual(target_addr, vptr + n)
1734 || get_user_u32(target_addrlen, vptr + 2 * n))
1735 return -TARGET_EFAULT;
1737 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1740 case SOCKOP_getpeername:
1743 abi_ulong target_addr, target_addrlen;
1745 if (get_user_s32(sockfd, vptr)
1746 || get_user_ual(target_addr, vptr + n)
1747 || get_user_u32(target_addrlen, vptr + 2 * n))
1748 return -TARGET_EFAULT;
1750 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1753 case SOCKOP_socketpair:
1755 int domain, type, protocol;
1758 if (get_user_s32(domain, vptr)
1759 || get_user_s32(type, vptr + n)
1760 || get_user_s32(protocol, vptr + 2 * n)
1761 || get_user_ual(tab, vptr + 3 * n))
1762 return -TARGET_EFAULT;
1764 ret = do_socketpair(domain, type, protocol, tab);
1774 if (get_user_s32(sockfd, vptr)
1775 || get_user_ual(msg, vptr + n)
1776 || get_user_ual(len, vptr + 2 * n)
1777 || get_user_s32(flags, vptr + 3 * n))
1778 return -TARGET_EFAULT;
1780 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1790 if (get_user_s32(sockfd, vptr)
1791 || get_user_ual(msg, vptr + n)
1792 || get_user_ual(len, vptr + 2 * n)
1793 || get_user_s32(flags, vptr + 3 * n))
1794 return -TARGET_EFAULT;
1796 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1808 if (get_user_s32(sockfd, vptr)
1809 || get_user_ual(msg, vptr + n)
1810 || get_user_ual(len, vptr + 2 * n)
1811 || get_user_s32(flags, vptr + 3 * n)
1812 || get_user_ual(addr, vptr + 4 * n)
1813 || get_user_u32(addrlen, vptr + 5 * n))
1814 return -TARGET_EFAULT;
1816 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1819 case SOCKOP_recvfrom:
1828 if (get_user_s32(sockfd, vptr)
1829 || get_user_ual(msg, vptr + n)
1830 || get_user_ual(len, vptr + 2 * n)
1831 || get_user_s32(flags, vptr + 3 * n)
1832 || get_user_ual(addr, vptr + 4 * n)
1833 || get_user_u32(addrlen, vptr + 5 * n))
1834 return -TARGET_EFAULT;
1836 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1839 case SOCKOP_shutdown:
1843 if (get_user_s32(sockfd, vptr)
1844 || get_user_s32(how, vptr + n))
1845 return -TARGET_EFAULT;
1847 ret = get_errno(shutdown(sockfd, how));
1850 case SOCKOP_sendmsg:
1851 case SOCKOP_recvmsg:
1854 abi_ulong target_msg;
1857 if (get_user_s32(fd, vptr)
1858 || get_user_ual(target_msg, vptr + n)
1859 || get_user_s32(flags, vptr + 2 * n))
1860 return -TARGET_EFAULT;
1862 ret = do_sendrecvmsg(fd, target_msg, flags,
1863 (num == SOCKOP_sendmsg));
1866 case SOCKOP_setsockopt:
1874 if (get_user_s32(sockfd, vptr)
1875 || get_user_s32(level, vptr + n)
1876 || get_user_s32(optname, vptr + 2 * n)
1877 || get_user_ual(optval, vptr + 3 * n)
1878 || get_user_u32(optlen, vptr + 4 * n))
1879 return -TARGET_EFAULT;
1881 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1884 case SOCKOP_getsockopt:
1892 if (get_user_s32(sockfd, vptr)
1893 || get_user_s32(level, vptr + n)
1894 || get_user_s32(optname, vptr + 2 * n)
1895 || get_user_ual(optval, vptr + 3 * n)
1896 || get_user_u32(optlen, vptr + 4 * n))
1897 return -TARGET_EFAULT;
1899 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1903 gemu_log("Unsupported socketcall: %d\n", num);
1904 ret = -TARGET_ENOSYS;
1911 #define N_SHM_REGIONS 32
1913 static struct shm_region {
1916 } shm_regions[N_SHM_REGIONS];
1918 struct target_ipc_perm
1925 unsigned short int mode;
1926 unsigned short int __pad1;
1927 unsigned short int __seq;
1928 unsigned short int __pad2;
1929 abi_ulong __unused1;
1930 abi_ulong __unused2;
1933 struct target_semid_ds
1935 struct target_ipc_perm sem_perm;
1936 abi_ulong sem_otime;
1937 abi_ulong __unused1;
1938 abi_ulong sem_ctime;
1939 abi_ulong __unused2;
1940 abi_ulong sem_nsems;
1941 abi_ulong __unused3;
1942 abi_ulong __unused4;
1945 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1946 abi_ulong target_addr)
1948 struct target_ipc_perm *target_ip;
1949 struct target_semid_ds *target_sd;
1951 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1952 return -TARGET_EFAULT;
1953 target_ip=&(target_sd->sem_perm);
1954 host_ip->__key = tswapl(target_ip->__key);
1955 host_ip->uid = tswapl(target_ip->uid);
1956 host_ip->gid = tswapl(target_ip->gid);
1957 host_ip->cuid = tswapl(target_ip->cuid);
1958 host_ip->cgid = tswapl(target_ip->cgid);
1959 host_ip->mode = tswapl(target_ip->mode);
1960 unlock_user_struct(target_sd, target_addr, 0);
1964 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1965 struct ipc_perm *host_ip)
1967 struct target_ipc_perm *target_ip;
1968 struct target_semid_ds *target_sd;
1970 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1971 return -TARGET_EFAULT;
1972 target_ip = &(target_sd->sem_perm);
1973 target_ip->__key = tswapl(host_ip->__key);
1974 target_ip->uid = tswapl(host_ip->uid);
1975 target_ip->gid = tswapl(host_ip->gid);
1976 target_ip->cuid = tswapl(host_ip->cuid);
1977 target_ip->cgid = tswapl(host_ip->cgid);
1978 target_ip->mode = tswapl(host_ip->mode);
1979 unlock_user_struct(target_sd, target_addr, 1);
1983 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1984 abi_ulong target_addr)
1986 struct target_semid_ds *target_sd;
1988 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1989 return -TARGET_EFAULT;
1990 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
1991 return -TARGET_EFAULT;
1992 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1993 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1994 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1995 unlock_user_struct(target_sd, target_addr, 0);
1999 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2000 struct semid_ds *host_sd)
2002 struct target_semid_ds *target_sd;
2004 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2005 return -TARGET_EFAULT;
2006 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2007 return -TARGET_EFAULT;;
2008 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2009 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2010 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2011 unlock_user_struct(target_sd, target_addr, 1);
2015 struct target_seminfo {
2028 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2029 struct seminfo *host_seminfo)
2031 struct target_seminfo *target_seminfo;
2032 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2033 return -TARGET_EFAULT;
2034 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2035 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2036 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2037 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2038 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2039 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2040 __put_user(host_seminfo->semume, &target_seminfo->semume);
2041 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2042 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2043 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2044 unlock_user_struct(target_seminfo, target_addr, 1);
2050 struct semid_ds *buf;
2051 unsigned short *array;
2052 struct seminfo *__buf;
2055 union target_semun {
2062 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2063 abi_ulong target_addr)
2066 unsigned short *array;
2068 struct semid_ds semid_ds;
2071 semun.buf = &semid_ds;
2073 ret = semctl(semid, 0, IPC_STAT, semun);
2075 return get_errno(ret);
2077 nsems = semid_ds.sem_nsems;
2079 *host_array = malloc(nsems*sizeof(unsigned short));
2080 array = lock_user(VERIFY_READ, target_addr,
2081 nsems*sizeof(unsigned short), 1);
2083 return -TARGET_EFAULT;
2085 for(i=0; i<nsems; i++) {
2086 __get_user((*host_array)[i], &array[i]);
2088 unlock_user(array, target_addr, 0);
2093 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2094 unsigned short **host_array)
2097 unsigned short *array;
2099 struct semid_ds semid_ds;
2102 semun.buf = &semid_ds;
2104 ret = semctl(semid, 0, IPC_STAT, semun);
2106 return get_errno(ret);
2108 nsems = semid_ds.sem_nsems;
2110 array = lock_user(VERIFY_WRITE, target_addr,
2111 nsems*sizeof(unsigned short), 0);
2113 return -TARGET_EFAULT;
2115 for(i=0; i<nsems; i++) {
2116 __put_user((*host_array)[i], &array[i]);
2119 unlock_user(array, target_addr, 1);
2124 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2125 union target_semun target_su)
2128 struct semid_ds dsarg;
2129 unsigned short *array;
2130 struct seminfo seminfo;
2131 abi_long ret = -TARGET_EINVAL;
2140 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2144 ret = get_errno(semctl(semid, semnum, cmd, arg));
2145 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2151 arg.val = tswapl(target_su.val);
2152 ret = get_errno(semctl(semid, semnum, cmd, arg));
2153 target_su.val = tswapl(arg.val);
2157 err = target_to_host_semarray(semid, &array, target_su.array);
2161 ret = get_errno(semctl(semid, semnum, cmd, arg));
2162 err = host_to_target_semarray(semid, target_su.array, &array);
2168 arg.__buf = &seminfo;
2169 ret = get_errno(semctl(semid, semnum, cmd, arg));
2170 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2178 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2185 struct target_sembuf {
2186 unsigned short sem_num;
2191 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2192 abi_ulong target_addr,
2195 struct target_sembuf *target_sembuf;
2198 target_sembuf = lock_user(VERIFY_READ, target_addr,
2199 nsops*sizeof(struct target_sembuf), 1);
2201 return -TARGET_EFAULT;
2203 for(i=0; i<nsops; i++) {
2204 __put_user(target_sembuf[i].sem_num, &host_sembuf[i].sem_num);
2205 __put_user(target_sembuf[i].sem_op, &host_sembuf[i].sem_op);
2206 __put_user(target_sembuf[i].sem_flg, &host_sembuf[i].sem_flg);
2209 unlock_user(target_sembuf, target_addr, 0);
2214 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2216 struct sembuf sops[nsops];
2218 if (target_to_host_sembuf(sops, ptr, nsops))
2219 return -TARGET_EFAULT;
2221 return semop(semid, sops, nsops);
2224 struct target_msqid_ds
2226 struct target_ipc_perm msg_perm;
2227 abi_ulong msg_stime;
2228 #if TARGET_ABI_BITS == 32
2229 abi_ulong __unused1;
2231 abi_ulong msg_rtime;
2232 #if TARGET_ABI_BITS == 32
2233 abi_ulong __unused2;
2235 abi_ulong msg_ctime;
2236 #if TARGET_ABI_BITS == 32
2237 abi_ulong __unused3;
2239 abi_ulong __msg_cbytes;
2241 abi_ulong msg_qbytes;
2242 abi_ulong msg_lspid;
2243 abi_ulong msg_lrpid;
2244 abi_ulong __unused4;
2245 abi_ulong __unused5;
2248 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2249 abi_ulong target_addr)
2251 struct target_msqid_ds *target_md;
2253 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2254 return -TARGET_EFAULT;
2255 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2256 return -TARGET_EFAULT;
2257 host_md->msg_stime = tswapl(target_md->msg_stime);
2258 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2259 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2260 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2261 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2262 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2263 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2264 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2265 unlock_user_struct(target_md, target_addr, 0);
2269 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2270 struct msqid_ds *host_md)
2272 struct target_msqid_ds *target_md;
2274 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2275 return -TARGET_EFAULT;
2276 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2277 return -TARGET_EFAULT;
2278 target_md->msg_stime = tswapl(host_md->msg_stime);
2279 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2280 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2281 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2282 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2283 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2284 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2285 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2286 unlock_user_struct(target_md, target_addr, 1);
2290 struct target_msginfo {
2298 unsigned short int msgseg;
2301 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2302 struct msginfo *host_msginfo)
2304 struct target_msginfo *target_msginfo;
2305 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2306 return -TARGET_EFAULT;
2307 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2308 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2309 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2310 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2311 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2312 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2313 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2314 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2315 unlock_user_struct(target_msginfo, target_addr, 1);
2319 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2321 struct msqid_ds dsarg;
2322 struct msginfo msginfo;
2323 abi_long ret = -TARGET_EINVAL;
2331 if (target_to_host_msqid_ds(&dsarg,ptr))
2332 return -TARGET_EFAULT;
2333 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2334 if (host_to_target_msqid_ds(ptr,&dsarg))
2335 return -TARGET_EFAULT;
2338 ret = get_errno(msgctl(msgid, cmd, NULL));
2342 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2343 if (host_to_target_msginfo(ptr, &msginfo))
2344 return -TARGET_EFAULT;
2351 struct target_msgbuf {
2356 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2357 unsigned int msgsz, int msgflg)
2359 struct target_msgbuf *target_mb;
2360 struct msgbuf *host_mb;
2363 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2364 return -TARGET_EFAULT;
2365 host_mb = malloc(msgsz+sizeof(long));
2366 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2367 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2368 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2370 unlock_user_struct(target_mb, msgp, 0);
2375 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2376 unsigned int msgsz, abi_long msgtyp,
2379 struct target_msgbuf *target_mb;
2381 struct msgbuf *host_mb;
2384 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2385 return -TARGET_EFAULT;
2387 host_mb = malloc(msgsz+sizeof(long));
2388 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2391 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2392 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2393 if (!target_mtext) {
2394 ret = -TARGET_EFAULT;
2397 memcpy(target_mb->mtext, host_mb->mtext, ret);
2398 unlock_user(target_mtext, target_mtext_addr, ret);
2401 target_mb->mtype = tswapl(host_mb->mtype);
2406 unlock_user_struct(target_mb, msgp, 1);
2410 struct target_shmid_ds
2412 struct target_ipc_perm shm_perm;
2413 abi_ulong shm_segsz;
2414 abi_ulong shm_atime;
2415 #if TARGET_ABI_BITS == 32
2416 abi_ulong __unused1;
2418 abi_ulong shm_dtime;
2419 #if TARGET_ABI_BITS == 32
2420 abi_ulong __unused2;
2422 abi_ulong shm_ctime;
2423 #if TARGET_ABI_BITS == 32
2424 abi_ulong __unused3;
2428 abi_ulong shm_nattch;
2429 unsigned long int __unused4;
2430 unsigned long int __unused5;
2433 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2434 abi_ulong target_addr)
2436 struct target_shmid_ds *target_sd;
2438 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2439 return -TARGET_EFAULT;
2440 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2441 return -TARGET_EFAULT;
2442 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2443 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2444 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2445 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2446 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2447 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2448 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2449 unlock_user_struct(target_sd, target_addr, 0);
2453 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2454 struct shmid_ds *host_sd)
2456 struct target_shmid_ds *target_sd;
2458 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2459 return -TARGET_EFAULT;
2460 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2461 return -TARGET_EFAULT;
2462 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2463 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2464 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2465 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2466 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2467 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2468 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2469 unlock_user_struct(target_sd, target_addr, 1);
2473 struct target_shminfo {
2481 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2482 struct shminfo *host_shminfo)
2484 struct target_shminfo *target_shminfo;
2485 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2486 return -TARGET_EFAULT;
2487 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2488 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2489 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2490 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2491 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2492 unlock_user_struct(target_shminfo, target_addr, 1);
2496 struct target_shm_info {
2501 abi_ulong swap_attempts;
2502 abi_ulong swap_successes;
2505 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2506 struct shm_info *host_shm_info)
2508 struct target_shm_info *target_shm_info;
2509 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2510 return -TARGET_EFAULT;
2511 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2512 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2513 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2514 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2515 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2516 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2517 unlock_user_struct(target_shm_info, target_addr, 1);
2521 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2523 struct shmid_ds dsarg;
2524 struct shminfo shminfo;
2525 struct shm_info shm_info;
2526 abi_long ret = -TARGET_EINVAL;
2534 if (target_to_host_shmid_ds(&dsarg, buf))
2535 return -TARGET_EFAULT;
2536 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2537 if (host_to_target_shmid_ds(buf, &dsarg))
2538 return -TARGET_EFAULT;
2541 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2542 if (host_to_target_shminfo(buf, &shminfo))
2543 return -TARGET_EFAULT;
2546 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2547 if (host_to_target_shm_info(buf, &shm_info))
2548 return -TARGET_EFAULT;
2553 ret = get_errno(shmctl(shmid, cmd, NULL));
2560 static inline abi_long do_shmat(int shmid, abi_ulong shmaddr, int shmflg,
2561 unsigned long *raddr)
2563 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
2565 struct shmid_ds shm_info;
2568 /* find out the length of the shared memory segment */
2569 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2570 if (is_error(ret)) {
2571 /* can't get length, bail out */
2572 return get_errno(ret);
2578 *raddr = (unsigned long) shmat(shmid, g2h(shmaddr), shmflg);
2580 abi_ulong mmap_start;
2582 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2584 if (mmap_start == -1) {
2588 *raddr = (unsigned long) shmat(shmid, g2h(mmap_start),
2589 shmflg | SHM_REMAP);
2594 return get_errno(*raddr);
2597 page_set_flags(h2g(*raddr), h2g(*raddr) + shm_info.shm_segsz,
2598 PAGE_VALID | PAGE_READ |
2599 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2601 for (i = 0; i < N_SHM_REGIONS; i++) {
2602 if (shm_regions[i].start == 0) {
2603 shm_regions[i].start = h2g(*raddr);
2604 shm_regions[i].size = shm_info.shm_segsz;
2609 *raddr = h2g(*raddr);
2615 static inline abi_long do_shmdt(abi_ulong shmaddr)
2619 for (i = 0; i < N_SHM_REGIONS; ++i) {
2620 if (shm_regions[i].start == shmaddr) {
2621 shm_regions[i].start = 0;
2622 page_set_flags(shmaddr, shm_regions[i].size, 0);
2627 return get_errno(shmdt(g2h(shmaddr)));
2630 #ifdef TARGET_NR_ipc
2631 /* ??? This only works with linear mappings. */
2632 /* do_ipc() must return target values and target errnos. */
2633 static abi_long do_ipc(unsigned int call, int first,
2634 int second, int third,
2635 abi_long ptr, abi_long fifth)
2640 version = call >> 16;
2645 ret = do_semop(first, ptr, second);
2649 ret = get_errno(semget(first, second, third));
2653 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2657 ret = get_errno(msgget(first, second));
2661 ret = do_msgsnd(first, ptr, second, third);
2665 ret = do_msgctl(first, second, ptr);
2672 struct target_ipc_kludge {
2677 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2678 ret = -TARGET_EFAULT;
2682 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2684 unlock_user_struct(tmp, ptr, 0);
2688 ret = do_msgrcv(first, ptr, second, fifth, third);
2696 unsigned long raddr;
2698 ret = do_shmat(first, ptr, second, &raddr);
2702 ret = put_user_ual(raddr, third);
2706 ret = -TARGET_EINVAL;
2712 ret = do_shmdt(ptr);
2716 ret = get_errno(shmget(first, second, third));
2720 ret = do_shmctl(first, second, third);
2724 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2725 ret = -TARGET_ENOSYS;
2732 /* kernel structure types definitions */
2735 #define STRUCT(name, list...) STRUCT_ ## name,
2736 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2738 #include "syscall_types.h"
2741 #undef STRUCT_SPECIAL
2743 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2744 #define STRUCT_SPECIAL(name)
2745 #include "syscall_types.h"
2747 #undef STRUCT_SPECIAL
2749 typedef struct IOCTLEntry {
2750 unsigned int target_cmd;
2751 unsigned int host_cmd;
2754 const argtype arg_type[5];
2757 #define IOC_R 0x0001
2758 #define IOC_W 0x0002
2759 #define IOC_RW (IOC_R | IOC_W)
2761 #define MAX_STRUCT_SIZE 4096
2763 static IOCTLEntry ioctl_entries[] = {
2764 #define IOCTL(cmd, access, types...) \
2765 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2770 /* ??? Implement proper locking for ioctls. */
2771 /* do_ioctl() Must return target values and target errnos. */
2772 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2774 const IOCTLEntry *ie;
2775 const argtype *arg_type;
2777 uint8_t buf_temp[MAX_STRUCT_SIZE];
2783 if (ie->target_cmd == 0) {
2784 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2785 return -TARGET_ENOSYS;
2787 if (ie->target_cmd == cmd)
2791 arg_type = ie->arg_type;
2793 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2795 switch(arg_type[0]) {
2798 ret = get_errno(ioctl(fd, ie->host_cmd));
2803 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2807 target_size = thunk_type_size(arg_type, 0);
2808 switch(ie->access) {
2810 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2811 if (!is_error(ret)) {
2812 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2814 return -TARGET_EFAULT;
2815 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2816 unlock_user(argptr, arg, target_size);
2820 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2822 return -TARGET_EFAULT;
2823 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2824 unlock_user(argptr, arg, 0);
2825 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2829 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2831 return -TARGET_EFAULT;
2832 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2833 unlock_user(argptr, arg, 0);
2834 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2835 if (!is_error(ret)) {
2836 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2838 return -TARGET_EFAULT;
2839 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2840 unlock_user(argptr, arg, target_size);
2846 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2847 (long)cmd, arg_type[0]);
2848 ret = -TARGET_ENOSYS;
2854 static const bitmask_transtbl iflag_tbl[] = {
2855 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2856 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2857 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2858 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2859 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2860 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2861 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2862 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2863 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2864 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2865 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2866 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2867 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2868 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2872 static const bitmask_transtbl oflag_tbl[] = {
2873 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2874 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2875 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2876 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2877 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2878 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2879 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2880 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2881 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2882 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2883 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2884 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2885 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2886 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2887 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2888 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2889 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2890 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2891 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2892 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2893 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2894 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2895 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2896 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2900 static const bitmask_transtbl cflag_tbl[] = {
2901 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2902 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2903 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2904 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2905 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2906 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2907 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2908 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2909 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2910 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2911 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2912 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2913 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2914 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2915 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2916 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2917 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2918 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2919 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2920 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2921 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2922 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2923 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2924 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2925 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2926 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2927 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2928 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2929 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2930 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2931 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2935 static const bitmask_transtbl lflag_tbl[] = {
2936 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2937 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2938 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2939 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2940 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2941 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2942 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2943 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2944 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2945 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2946 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2947 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2948 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2949 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2950 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2954 static void target_to_host_termios (void *dst, const void *src)
2956 struct host_termios *host = dst;
2957 const struct target_termios *target = src;
2960 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2962 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2964 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2966 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2967 host->c_line = target->c_line;
2969 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2970 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2971 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2972 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2973 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2974 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2975 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2976 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2977 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2978 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2979 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2980 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2981 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2982 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2983 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2984 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2985 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2988 static void host_to_target_termios (void *dst, const void *src)
2990 struct target_termios *target = dst;
2991 const struct host_termios *host = src;
2994 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2996 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2998 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3000 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3001 target->c_line = host->c_line;
3003 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3004 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3005 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3006 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3007 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3008 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3009 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3010 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3011 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3012 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3013 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3014 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3015 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3016 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3017 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3018 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3019 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3022 static const StructEntry struct_termios_def = {
3023 .convert = { host_to_target_termios, target_to_host_termios },
3024 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3025 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3028 static bitmask_transtbl mmap_flags_tbl[] = {
3029 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3030 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3031 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3032 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3033 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3034 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3036 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3040 #if defined(TARGET_I386)
3042 /* NOTE: there is really one LDT for all the threads */
3043 static uint8_t *ldt_table;
3045 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3052 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3053 if (size > bytecount)
3055 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3057 return -TARGET_EFAULT;
3058 /* ??? Should this by byteswapped? */
3059 memcpy(p, ldt_table, size);
3060 unlock_user(p, ptr, size);
3064 /* XXX: add locking support */
3065 static abi_long write_ldt(CPUX86State *env,
3066 abi_ulong ptr, unsigned long bytecount, int oldmode)
3068 struct target_modify_ldt_ldt_s ldt_info;
3069 struct target_modify_ldt_ldt_s *target_ldt_info;
3070 int seg_32bit, contents, read_exec_only, limit_in_pages;
3071 int seg_not_present, useable, lm;
3072 uint32_t *lp, entry_1, entry_2;
3074 if (bytecount != sizeof(ldt_info))
3075 return -TARGET_EINVAL;
3076 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3077 return -TARGET_EFAULT;
3078 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3079 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3080 ldt_info.limit = tswap32(target_ldt_info->limit);
3081 ldt_info.flags = tswap32(target_ldt_info->flags);
3082 unlock_user_struct(target_ldt_info, ptr, 0);
3084 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3085 return -TARGET_EINVAL;
3086 seg_32bit = ldt_info.flags & 1;
3087 contents = (ldt_info.flags >> 1) & 3;
3088 read_exec_only = (ldt_info.flags >> 3) & 1;
3089 limit_in_pages = (ldt_info.flags >> 4) & 1;
3090 seg_not_present = (ldt_info.flags >> 5) & 1;
3091 useable = (ldt_info.flags >> 6) & 1;
3095 lm = (ldt_info.flags >> 7) & 1;
3097 if (contents == 3) {
3099 return -TARGET_EINVAL;
3100 if (seg_not_present == 0)
3101 return -TARGET_EINVAL;
3103 /* allocate the LDT */
3105 env->ldt.base = target_mmap(0,
3106 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3107 PROT_READ|PROT_WRITE,
3108 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3109 if (env->ldt.base == -1)
3110 return -TARGET_ENOMEM;
3111 memset(g2h(env->ldt.base), 0,
3112 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3113 env->ldt.limit = 0xffff;
3114 ldt_table = g2h(env->ldt.base);
3117 /* NOTE: same code as Linux kernel */
3118 /* Allow LDTs to be cleared by the user. */
3119 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3122 read_exec_only == 1 &&
3124 limit_in_pages == 0 &&
3125 seg_not_present == 1 &&
3133 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3134 (ldt_info.limit & 0x0ffff);
3135 entry_2 = (ldt_info.base_addr & 0xff000000) |
3136 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3137 (ldt_info.limit & 0xf0000) |
3138 ((read_exec_only ^ 1) << 9) |
3140 ((seg_not_present ^ 1) << 15) |
3142 (limit_in_pages << 23) |
3146 entry_2 |= (useable << 20);
3148 /* Install the new entry ... */
3150 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3151 lp[0] = tswap32(entry_1);
3152 lp[1] = tswap32(entry_2);
3156 /* specific and weird i386 syscalls */
3157 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3158 unsigned long bytecount)
3164 ret = read_ldt(ptr, bytecount);
3167 ret = write_ldt(env, ptr, bytecount, 1);
3170 ret = write_ldt(env, ptr, bytecount, 0);
3173 ret = -TARGET_ENOSYS;
3179 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3180 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3182 uint64_t *gdt_table = g2h(env->gdt.base);
3183 struct target_modify_ldt_ldt_s ldt_info;
3184 struct target_modify_ldt_ldt_s *target_ldt_info;
3185 int seg_32bit, contents, read_exec_only, limit_in_pages;
3186 int seg_not_present, useable, lm;
3187 uint32_t *lp, entry_1, entry_2;
3190 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3191 if (!target_ldt_info)
3192 return -TARGET_EFAULT;
3193 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3194 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3195 ldt_info.limit = tswap32(target_ldt_info->limit);
3196 ldt_info.flags = tswap32(target_ldt_info->flags);
3197 if (ldt_info.entry_number == -1) {
3198 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3199 if (gdt_table[i] == 0) {
3200 ldt_info.entry_number = i;
3201 target_ldt_info->entry_number = tswap32(i);
3206 unlock_user_struct(target_ldt_info, ptr, 1);
3208 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3209 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3210 return -TARGET_EINVAL;
3211 seg_32bit = ldt_info.flags & 1;
3212 contents = (ldt_info.flags >> 1) & 3;
3213 read_exec_only = (ldt_info.flags >> 3) & 1;
3214 limit_in_pages = (ldt_info.flags >> 4) & 1;
3215 seg_not_present = (ldt_info.flags >> 5) & 1;
3216 useable = (ldt_info.flags >> 6) & 1;
3220 lm = (ldt_info.flags >> 7) & 1;
3223 if (contents == 3) {
3224 if (seg_not_present == 0)
3225 return -TARGET_EINVAL;
3228 /* NOTE: same code as Linux kernel */
3229 /* Allow LDTs to be cleared by the user. */
3230 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3231 if ((contents == 0 &&
3232 read_exec_only == 1 &&
3234 limit_in_pages == 0 &&
3235 seg_not_present == 1 &&
3243 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3244 (ldt_info.limit & 0x0ffff);
3245 entry_2 = (ldt_info.base_addr & 0xff000000) |
3246 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3247 (ldt_info.limit & 0xf0000) |
3248 ((read_exec_only ^ 1) << 9) |
3250 ((seg_not_present ^ 1) << 15) |
3252 (limit_in_pages << 23) |
3257 /* Install the new entry ... */
3259 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3260 lp[0] = tswap32(entry_1);
3261 lp[1] = tswap32(entry_2);
3265 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3267 struct target_modify_ldt_ldt_s *target_ldt_info;
3268 uint64_t *gdt_table = g2h(env->gdt.base);
3269 uint32_t base_addr, limit, flags;
3270 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3271 int seg_not_present, useable, lm;
3272 uint32_t *lp, entry_1, entry_2;
3274 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3275 if (!target_ldt_info)
3276 return -TARGET_EFAULT;
3277 idx = tswap32(target_ldt_info->entry_number);
3278 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3279 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3280 unlock_user_struct(target_ldt_info, ptr, 1);
3281 return -TARGET_EINVAL;
3283 lp = (uint32_t *)(gdt_table + idx);
3284 entry_1 = tswap32(lp[0]);
3285 entry_2 = tswap32(lp[1]);
3287 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3288 contents = (entry_2 >> 10) & 3;
3289 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3290 seg_32bit = (entry_2 >> 22) & 1;
3291 limit_in_pages = (entry_2 >> 23) & 1;
3292 useable = (entry_2 >> 20) & 1;
3296 lm = (entry_2 >> 21) & 1;
3298 flags = (seg_32bit << 0) | (contents << 1) |
3299 (read_exec_only << 3) | (limit_in_pages << 4) |
3300 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3301 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3302 base_addr = (entry_1 >> 16) |
3303 (entry_2 & 0xff000000) |
3304 ((entry_2 & 0xff) << 16);
3305 target_ldt_info->base_addr = tswapl(base_addr);
3306 target_ldt_info->limit = tswap32(limit);
3307 target_ldt_info->flags = tswap32(flags);
3308 unlock_user_struct(target_ldt_info, ptr, 1);
3311 #endif /* TARGET_I386 && TARGET_ABI32 */
3313 #ifndef TARGET_ABI32
3314 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3321 case TARGET_ARCH_SET_GS:
3322 case TARGET_ARCH_SET_FS:
3323 if (code == TARGET_ARCH_SET_GS)
3327 cpu_x86_load_seg(env, idx, 0);
3328 env->segs[idx].base = addr;
3330 case TARGET_ARCH_GET_GS:
3331 case TARGET_ARCH_GET_FS:
3332 if (code == TARGET_ARCH_GET_GS)
3336 val = env->segs[idx].base;
3337 if (put_user(val, addr, abi_ulong))
3338 return -TARGET_EFAULT;
3341 ret = -TARGET_EINVAL;
3348 #endif /* defined(TARGET_I386) */
3350 #if defined(USE_NPTL)
3352 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3354 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3357 pthread_mutex_t mutex;
3358 pthread_cond_t cond;
3362 abi_ulong child_tidptr;
3363 abi_ulong parent_tidptr;
3367 static void *clone_func(void *arg)
3369 new_thread_info *info = arg;
3374 info->tid = gettid();
3375 if (info->child_tidptr)
3376 put_user_u32(info->tid, info->child_tidptr);
3377 if (info->parent_tidptr)
3378 put_user_u32(info->tid, info->parent_tidptr);
3379 /* Enable signals. */
3380 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3381 /* Signal to the parent that we're ready. */
3382 pthread_mutex_lock(&info->mutex);
3383 pthread_cond_broadcast(&info->cond);
3384 pthread_mutex_unlock(&info->mutex);
3385 /* Wait until the parent has finshed initializing the tls state. */
3386 pthread_mutex_lock(&clone_lock);
3387 pthread_mutex_unlock(&clone_lock);
3393 /* this stack is the equivalent of the kernel stack associated with a
3395 #define NEW_STACK_SIZE 8192
3397 static int clone_func(void *arg)
3399 CPUState *env = arg;
3406 /* do_fork() Must return host values and target errnos (unlike most
3407 do_*() functions). */
3408 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3409 abi_ulong parent_tidptr, target_ulong newtls,
3410 abi_ulong child_tidptr)
3416 #if defined(USE_NPTL)
3417 unsigned int nptl_flags;
3421 /* Emulate vfork() with fork() */
3422 if (flags & CLONE_VFORK)
3423 flags &= ~(CLONE_VFORK | CLONE_VM);
3425 if (flags & CLONE_VM) {
3426 #if defined(USE_NPTL)
3427 new_thread_info info;
3428 pthread_attr_t attr;
3430 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3431 init_task_state(ts);
3432 new_stack = ts->stack;
3433 /* we create a new CPU instance. */
3434 new_env = cpu_copy(env);
3435 /* Init regs that differ from the parent. */
3436 cpu_clone_regs(new_env, newsp);
3437 new_env->opaque = ts;
3438 #if defined(USE_NPTL)
3440 flags &= ~CLONE_NPTL_FLAGS2;
3442 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3443 ts->child_tidptr = child_tidptr;
3446 if (nptl_flags & CLONE_SETTLS)
3447 cpu_set_tls (new_env, newtls);
3449 /* Grab a mutex so that thread setup appears atomic. */
3450 pthread_mutex_lock(&clone_lock);
3452 memset(&info, 0, sizeof(info));
3453 pthread_mutex_init(&info.mutex, NULL);
3454 pthread_mutex_lock(&info.mutex);
3455 pthread_cond_init(&info.cond, NULL);
3457 if (nptl_flags & CLONE_CHILD_SETTID)
3458 info.child_tidptr = child_tidptr;
3459 if (nptl_flags & CLONE_PARENT_SETTID)
3460 info.parent_tidptr = parent_tidptr;
3462 ret = pthread_attr_init(&attr);
3463 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3464 /* It is not safe to deliver signals until the child has finished
3465 initializing, so temporarily block all signals. */
3466 sigfillset(&sigmask);
3467 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3469 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3470 /* TODO: Free new CPU state if thread creation failed. */
3472 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3473 pthread_attr_destroy(&attr);
3475 /* Wait for the child to initialize. */
3476 pthread_cond_wait(&info.cond, &info.mutex);
3478 if (flags & CLONE_PARENT_SETTID)
3479 put_user_u32(ret, parent_tidptr);
3483 pthread_mutex_unlock(&info.mutex);
3484 pthread_cond_destroy(&info.cond);
3485 pthread_mutex_destroy(&info.mutex);
3486 pthread_mutex_unlock(&clone_lock);
3488 if (flags & CLONE_NPTL_FLAGS2)
3490 /* This is probably going to die very quickly, but do it anyway. */
3492 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3494 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3498 /* if no CLONE_VM, we consider it is a fork */
3499 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3504 /* Child Process. */
3505 cpu_clone_regs(env, newsp);
3507 #if defined(USE_NPTL)
3508 /* There is a race condition here. The parent process could
3509 theoretically read the TID in the child process before the child
3510 tid is set. This would require using either ptrace
3511 (not implemented) or having *_tidptr to point at a shared memory
3512 mapping. We can't repeat the spinlock hack used above because
3513 the child process gets its own copy of the lock. */
3514 if (flags & CLONE_CHILD_SETTID)
3515 put_user_u32(gettid(), child_tidptr);
3516 if (flags & CLONE_PARENT_SETTID)
3517 put_user_u32(gettid(), parent_tidptr);
3518 ts = (TaskState *)env->opaque;
3519 if (flags & CLONE_SETTLS)
3520 cpu_set_tls (env, newtls);
3521 if (flags & CLONE_CHILD_CLEARTID)
3522 ts->child_tidptr = child_tidptr;
3531 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3534 struct target_flock *target_fl;
3535 struct flock64 fl64;
3536 struct target_flock64 *target_fl64;
3540 case TARGET_F_GETLK:
3541 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3542 return -TARGET_EFAULT;
3543 fl.l_type = tswap16(target_fl->l_type);
3544 fl.l_whence = tswap16(target_fl->l_whence);
3545 fl.l_start = tswapl(target_fl->l_start);
3546 fl.l_len = tswapl(target_fl->l_len);
3547 fl.l_pid = tswapl(target_fl->l_pid);
3548 unlock_user_struct(target_fl, arg, 0);
3549 ret = get_errno(fcntl(fd, cmd, &fl));
3551 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3552 return -TARGET_EFAULT;
3553 target_fl->l_type = tswap16(fl.l_type);
3554 target_fl->l_whence = tswap16(fl.l_whence);
3555 target_fl->l_start = tswapl(fl.l_start);
3556 target_fl->l_len = tswapl(fl.l_len);
3557 target_fl->l_pid = tswapl(fl.l_pid);
3558 unlock_user_struct(target_fl, arg, 1);
3562 case TARGET_F_SETLK:
3563 case TARGET_F_SETLKW:
3564 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3565 return -TARGET_EFAULT;
3566 fl.l_type = tswap16(target_fl->l_type);
3567 fl.l_whence = tswap16(target_fl->l_whence);
3568 fl.l_start = tswapl(target_fl->l_start);
3569 fl.l_len = tswapl(target_fl->l_len);
3570 fl.l_pid = tswapl(target_fl->l_pid);
3571 unlock_user_struct(target_fl, arg, 0);
3572 ret = get_errno(fcntl(fd, cmd, &fl));
3575 case TARGET_F_GETLK64:
3576 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3577 return -TARGET_EFAULT;
3578 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3579 fl64.l_whence = tswap16(target_fl64->l_whence);
3580 fl64.l_start = tswapl(target_fl64->l_start);
3581 fl64.l_len = tswapl(target_fl64->l_len);
3582 fl64.l_pid = tswap16(target_fl64->l_pid);
3583 unlock_user_struct(target_fl64, arg, 0);
3584 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3586 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3587 return -TARGET_EFAULT;
3588 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3589 target_fl64->l_whence = tswap16(fl64.l_whence);
3590 target_fl64->l_start = tswapl(fl64.l_start);
3591 target_fl64->l_len = tswapl(fl64.l_len);
3592 target_fl64->l_pid = tswapl(fl64.l_pid);
3593 unlock_user_struct(target_fl64, arg, 1);
3596 case TARGET_F_SETLK64:
3597 case TARGET_F_SETLKW64:
3598 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3599 return -TARGET_EFAULT;
3600 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3601 fl64.l_whence = tswap16(target_fl64->l_whence);
3602 fl64.l_start = tswapl(target_fl64->l_start);
3603 fl64.l_len = tswapl(target_fl64->l_len);
3604 fl64.l_pid = tswap16(target_fl64->l_pid);
3605 unlock_user_struct(target_fl64, arg, 0);
3606 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3610 ret = get_errno(fcntl(fd, cmd, arg));
3612 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3617 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3621 ret = get_errno(fcntl(fd, cmd, arg));
3629 static inline int high2lowuid(int uid)
3637 static inline int high2lowgid(int gid)
3645 static inline int low2highuid(int uid)
3647 if ((int16_t)uid == -1)
3653 static inline int low2highgid(int gid)
3655 if ((int16_t)gid == -1)
3661 #endif /* USE_UID16 */
3663 void syscall_init(void)
3666 const argtype *arg_type;
3670 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3671 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3672 #include "syscall_types.h"
3674 #undef STRUCT_SPECIAL
3676 /* we patch the ioctl size if necessary. We rely on the fact that
3677 no ioctl has all the bits at '1' in the size field */
3679 while (ie->target_cmd != 0) {
3680 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3681 TARGET_IOC_SIZEMASK) {
3682 arg_type = ie->arg_type;
3683 if (arg_type[0] != TYPE_PTR) {
3684 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3689 size = thunk_type_size(arg_type, 0);
3690 ie->target_cmd = (ie->target_cmd &
3691 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3692 (size << TARGET_IOC_SIZESHIFT);
3695 /* Build target_to_host_errno_table[] table from
3696 * host_to_target_errno_table[]. */
3697 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3698 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3700 /* automatic consistency check if same arch */
3701 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3702 (defined(__x86_64__) && defined(TARGET_X86_64))
3703 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3704 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3705 ie->name, ie->target_cmd, ie->host_cmd);
3712 #if TARGET_ABI_BITS == 32
3713 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3715 #ifdef TARGET_WORDS_BIGENDIAN
3716 return ((uint64_t)word0 << 32) | word1;
3718 return ((uint64_t)word1 << 32) | word0;
3721 #else /* TARGET_ABI_BITS == 32 */
3722 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3726 #endif /* TARGET_ABI_BITS != 32 */
3728 #ifdef TARGET_NR_truncate64
3729 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3735 if (((CPUARMState *)cpu_env)->eabi)
3741 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3745 #ifdef TARGET_NR_ftruncate64
3746 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3752 if (((CPUARMState *)cpu_env)->eabi)
3758 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3762 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3763 abi_ulong target_addr)
3765 struct target_timespec *target_ts;
3767 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3768 return -TARGET_EFAULT;
3769 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3770 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3771 unlock_user_struct(target_ts, target_addr, 0);
3775 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3776 struct timespec *host_ts)
3778 struct target_timespec *target_ts;
3780 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3781 return -TARGET_EFAULT;
3782 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3783 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3784 unlock_user_struct(target_ts, target_addr, 1);
3788 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3789 static inline abi_long host_to_target_stat64(void *cpu_env,
3790 abi_ulong target_addr,
3791 struct stat *host_st)
3794 if (((CPUARMState *)cpu_env)->eabi) {
3795 struct target_eabi_stat64 *target_st;
3797 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3798 return -TARGET_EFAULT;
3799 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3800 __put_user(host_st->st_dev, &target_st->st_dev);
3801 __put_user(host_st->st_ino, &target_st->st_ino);
3802 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3803 __put_user(host_st->st_ino, &target_st->__st_ino);
3805 __put_user(host_st->st_mode, &target_st->st_mode);
3806 __put_user(host_st->st_nlink, &target_st->st_nlink);
3807 __put_user(host_st->st_uid, &target_st->st_uid);
3808 __put_user(host_st->st_gid, &target_st->st_gid);
3809 __put_user(host_st->st_rdev, &target_st->st_rdev);
3810 __put_user(host_st->st_size, &target_st->st_size);
3811 __put_user(host_st->st_blksize, &target_st->st_blksize);
3812 __put_user(host_st->st_blocks, &target_st->st_blocks);
3813 __put_user(host_st->st_atime, &target_st->target_st_atime);
3814 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3815 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3816 unlock_user_struct(target_st, target_addr, 1);
3820 #if TARGET_LONG_BITS == 64
3821 struct target_stat *target_st;
3823 struct target_stat64 *target_st;
3826 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3827 return -TARGET_EFAULT;
3828 memset(target_st, 0, sizeof(*target_st));
3829 __put_user(host_st->st_dev, &target_st->st_dev);
3830 __put_user(host_st->st_ino, &target_st->st_ino);
3831 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3832 __put_user(host_st->st_ino, &target_st->__st_ino);
3834 __put_user(host_st->st_mode, &target_st->st_mode);
3835 __put_user(host_st->st_nlink, &target_st->st_nlink);
3836 __put_user(host_st->st_uid, &target_st->st_uid);
3837 __put_user(host_st->st_gid, &target_st->st_gid);
3838 __put_user(host_st->st_rdev, &target_st->st_rdev);
3839 /* XXX: better use of kernel struct */
3840 __put_user(host_st->st_size, &target_st->st_size);
3841 __put_user(host_st->st_blksize, &target_st->st_blksize);
3842 __put_user(host_st->st_blocks, &target_st->st_blocks);
3843 __put_user(host_st->st_atime, &target_st->target_st_atime);
3844 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3845 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3846 unlock_user_struct(target_st, target_addr, 1);
3853 #if defined(USE_NPTL)
3854 /* ??? Using host futex calls even when target atomic operations
3855 are not really atomic probably breaks things. However implementing
3856 futexes locally would make futexes shared between multiple processes
3857 tricky. However they're probably useless because guest atomic
3858 operations won't work either. */
3859 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3860 target_ulong uaddr2, int val3)
3862 struct timespec ts, *pts;
3864 /* ??? We assume FUTEX_* constants are the same on both host
3870 target_to_host_timespec(pts, timeout);
3874 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3877 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3879 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3881 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3882 NULL, g2h(uaddr2), 0));
3883 case FUTEX_CMP_REQUEUE:
3884 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3885 NULL, g2h(uaddr2), tswap32(val3)));
3887 return -TARGET_ENOSYS;
3892 int get_osversion(void)
3894 static int osversion;
3895 struct new_utsname buf;
3900 if (qemu_uname_release && *qemu_uname_release) {
3901 s = qemu_uname_release;
3903 if (sys_uname(&buf))
3908 for (i = 0; i < 3; i++) {
3910 while (*s >= '0' && *s <= '9') {
3915 tmp = (tmp << 8) + n;
3923 /* do_syscall() should always have a single exit point at the end so
3924 that actions, such as logging of syscall results, can be performed.
3925 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3926 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3927 abi_long arg2, abi_long arg3, abi_long arg4,
3928 abi_long arg5, abi_long arg6)
3936 gemu_log("syscall %d", num);
3939 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3942 case TARGET_NR_exit:
3944 /* In old applications this may be used to implement _exit(2).
3945 However in threaded applictions it is used for thread termination,
3946 and _exit_group is used for application termination.
3947 Do thread termination if we have more then one thread. */
3948 /* FIXME: This probably breaks if a signal arrives. We should probably
3949 be disabling signals. */
3950 if (first_cpu->next_cpu) {
3957 while (p && p != (CPUState *)cpu_env) {
3958 lastp = &p->next_cpu;
3961 /* If we didn't find the CPU for this thread then something is
3965 /* Remove the CPU from the list. */
3966 *lastp = p->next_cpu;
3968 TaskState *ts = ((CPUState *)cpu_env)->opaque;
3969 if (ts->child_tidptr) {
3970 put_user_u32(0, ts->child_tidptr);
3971 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
3974 /* TODO: Free CPU state. */
3981 gdb_exit(cpu_env, arg1);
3983 ret = 0; /* avoid warning */
3985 case TARGET_NR_read:
3989 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3991 ret = get_errno(read(arg1, p, arg3));
3992 unlock_user(p, arg2, ret);
3995 case TARGET_NR_write:
3996 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3998 ret = get_errno(write(arg1, p, arg3));
3999 unlock_user(p, arg2, 0);
4001 case TARGET_NR_open:
4002 if (!(p = lock_user_string(arg1)))
4004 ret = get_errno(open(path(p),
4005 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4007 unlock_user(p, arg1, 0);
4009 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4010 case TARGET_NR_openat:
4011 if (!(p = lock_user_string(arg2)))
4013 ret = get_errno(sys_openat(arg1,
4015 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4017 unlock_user(p, arg2, 0);
4020 case TARGET_NR_close:
4021 ret = get_errno(close(arg1));
4026 case TARGET_NR_fork:
4027 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4029 #ifdef TARGET_NR_waitpid
4030 case TARGET_NR_waitpid:
4033 ret = get_errno(waitpid(arg1, &status, arg3));
4034 if (!is_error(ret) && arg2
4035 && put_user_s32(status, arg2))
4040 #ifdef TARGET_NR_waitid
4041 case TARGET_NR_waitid:
4045 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4046 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4047 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4049 host_to_target_siginfo(p, &info);
4050 unlock_user(p, arg3, sizeof(target_siginfo_t));
4055 #ifdef TARGET_NR_creat /* not on alpha */
4056 case TARGET_NR_creat:
4057 if (!(p = lock_user_string(arg1)))
4059 ret = get_errno(creat(p, arg2));
4060 unlock_user(p, arg1, 0);
4063 case TARGET_NR_link:
4066 p = lock_user_string(arg1);
4067 p2 = lock_user_string(arg2);
4069 ret = -TARGET_EFAULT;
4071 ret = get_errno(link(p, p2));
4072 unlock_user(p2, arg2, 0);
4073 unlock_user(p, arg1, 0);
4076 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4077 case TARGET_NR_linkat:
4082 p = lock_user_string(arg2);
4083 p2 = lock_user_string(arg4);
4085 ret = -TARGET_EFAULT;
4087 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4088 unlock_user(p, arg2, 0);
4089 unlock_user(p2, arg4, 0);
4093 case TARGET_NR_unlink:
4094 if (!(p = lock_user_string(arg1)))
4096 ret = get_errno(unlink(p));
4097 unlock_user(p, arg1, 0);
4099 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4100 case TARGET_NR_unlinkat:
4101 if (!(p = lock_user_string(arg2)))
4103 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4104 unlock_user(p, arg2, 0);
4107 case TARGET_NR_execve:
4109 char **argp, **envp;
4112 abi_ulong guest_argp;
4113 abi_ulong guest_envp;
4119 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4120 if (get_user_ual(addr, gp))
4128 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4129 if (get_user_ual(addr, gp))
4136 argp = alloca((argc + 1) * sizeof(void *));
4137 envp = alloca((envc + 1) * sizeof(void *));
4139 for (gp = guest_argp, q = argp; gp;
4140 gp += sizeof(abi_ulong), q++) {
4141 if (get_user_ual(addr, gp))
4145 if (!(*q = lock_user_string(addr)))
4150 for (gp = guest_envp, q = envp; gp;
4151 gp += sizeof(abi_ulong), q++) {
4152 if (get_user_ual(addr, gp))
4156 if (!(*q = lock_user_string(addr)))
4161 if (!(p = lock_user_string(arg1)))
4163 ret = get_errno(execve(p, argp, envp));
4164 unlock_user(p, arg1, 0);
4169 ret = -TARGET_EFAULT;
4172 for (gp = guest_argp, q = argp; *q;
4173 gp += sizeof(abi_ulong), q++) {
4174 if (get_user_ual(addr, gp)
4177 unlock_user(*q, addr, 0);
4179 for (gp = guest_envp, q = envp; *q;
4180 gp += sizeof(abi_ulong), q++) {
4181 if (get_user_ual(addr, gp)
4184 unlock_user(*q, addr, 0);
4188 case TARGET_NR_chdir:
4189 if (!(p = lock_user_string(arg1)))
4191 ret = get_errno(chdir(p));
4192 unlock_user(p, arg1, 0);
4194 #ifdef TARGET_NR_time
4195 case TARGET_NR_time:
4198 ret = get_errno(time(&host_time));
4201 && put_user_sal(host_time, arg1))
4206 case TARGET_NR_mknod:
4207 if (!(p = lock_user_string(arg1)))
4209 ret = get_errno(mknod(p, arg2, arg3));
4210 unlock_user(p, arg1, 0);
4212 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4213 case TARGET_NR_mknodat:
4214 if (!(p = lock_user_string(arg2)))
4216 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4217 unlock_user(p, arg2, 0);
4220 case TARGET_NR_chmod:
4221 if (!(p = lock_user_string(arg1)))
4223 ret = get_errno(chmod(p, arg2));
4224 unlock_user(p, arg1, 0);
4226 #ifdef TARGET_NR_break
4227 case TARGET_NR_break:
4230 #ifdef TARGET_NR_oldstat
4231 case TARGET_NR_oldstat:
4234 case TARGET_NR_lseek:
4235 ret = get_errno(lseek(arg1, arg2, arg3));
4237 #ifdef TARGET_NR_getxpid
4238 case TARGET_NR_getxpid:
4240 case TARGET_NR_getpid:
4242 ret = get_errno(getpid());
4244 case TARGET_NR_mount:
4246 /* need to look at the data field */
4248 p = lock_user_string(arg1);
4249 p2 = lock_user_string(arg2);
4250 p3 = lock_user_string(arg3);
4251 if (!p || !p2 || !p3)
4252 ret = -TARGET_EFAULT;
4254 /* FIXME - arg5 should be locked, but it isn't clear how to
4255 * do that since it's not guaranteed to be a NULL-terminated
4258 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4259 unlock_user(p, arg1, 0);
4260 unlock_user(p2, arg2, 0);
4261 unlock_user(p3, arg3, 0);
4264 #ifdef TARGET_NR_umount
4265 case TARGET_NR_umount:
4266 if (!(p = lock_user_string(arg1)))
4268 ret = get_errno(umount(p));
4269 unlock_user(p, arg1, 0);
4272 #ifdef TARGET_NR_stime /* not on alpha */
4273 case TARGET_NR_stime:
4276 if (get_user_sal(host_time, arg1))
4278 ret = get_errno(stime(&host_time));
4282 case TARGET_NR_ptrace:
4284 #ifdef TARGET_NR_alarm /* not on alpha */
4285 case TARGET_NR_alarm:
4289 #ifdef TARGET_NR_oldfstat
4290 case TARGET_NR_oldfstat:
4293 #ifdef TARGET_NR_pause /* not on alpha */
4294 case TARGET_NR_pause:
4295 ret = get_errno(pause());
4298 #ifdef TARGET_NR_utime
4299 case TARGET_NR_utime:
4301 struct utimbuf tbuf, *host_tbuf;
4302 struct target_utimbuf *target_tbuf;
4304 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4306 tbuf.actime = tswapl(target_tbuf->actime);
4307 tbuf.modtime = tswapl(target_tbuf->modtime);
4308 unlock_user_struct(target_tbuf, arg2, 0);
4313 if (!(p = lock_user_string(arg1)))
4315 ret = get_errno(utime(p, host_tbuf));
4316 unlock_user(p, arg1, 0);
4320 case TARGET_NR_utimes:
4322 struct timeval *tvp, tv[2];
4324 if (copy_from_user_timeval(&tv[0], arg2)
4325 || copy_from_user_timeval(&tv[1],
4326 arg2 + sizeof(struct target_timeval)))
4332 if (!(p = lock_user_string(arg1)))
4334 ret = get_errno(utimes(p, tvp));
4335 unlock_user(p, arg1, 0);
4338 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4339 case TARGET_NR_futimesat:
4341 struct timeval *tvp, tv[2];
4343 if (copy_from_user_timeval(&tv[0], arg3)
4344 || copy_from_user_timeval(&tv[1],
4345 arg3 + sizeof(struct target_timeval)))
4351 if (!(p = lock_user_string(arg2)))
4353 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4354 unlock_user(p, arg2, 0);
4358 #ifdef TARGET_NR_stty
4359 case TARGET_NR_stty:
4362 #ifdef TARGET_NR_gtty
4363 case TARGET_NR_gtty:
4366 case TARGET_NR_access:
4367 if (!(p = lock_user_string(arg1)))
4369 ret = get_errno(access(p, arg2));
4370 unlock_user(p, arg1, 0);
4372 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4373 case TARGET_NR_faccessat:
4374 if (!(p = lock_user_string(arg2)))
4376 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
4377 unlock_user(p, arg2, 0);
4380 #ifdef TARGET_NR_nice /* not on alpha */
4381 case TARGET_NR_nice:
4382 ret = get_errno(nice(arg1));
4385 #ifdef TARGET_NR_ftime
4386 case TARGET_NR_ftime:
4389 case TARGET_NR_sync:
4393 case TARGET_NR_kill:
4394 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4396 case TARGET_NR_rename:
4399 p = lock_user_string(arg1);
4400 p2 = lock_user_string(arg2);
4402 ret = -TARGET_EFAULT;
4404 ret = get_errno(rename(p, p2));
4405 unlock_user(p2, arg2, 0);
4406 unlock_user(p, arg1, 0);
4409 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4410 case TARGET_NR_renameat:
4413 p = lock_user_string(arg2);
4414 p2 = lock_user_string(arg4);
4416 ret = -TARGET_EFAULT;
4418 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4419 unlock_user(p2, arg4, 0);
4420 unlock_user(p, arg2, 0);
4424 case TARGET_NR_mkdir:
4425 if (!(p = lock_user_string(arg1)))
4427 ret = get_errno(mkdir(p, arg2));
4428 unlock_user(p, arg1, 0);
4430 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4431 case TARGET_NR_mkdirat:
4432 if (!(p = lock_user_string(arg2)))
4434 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4435 unlock_user(p, arg2, 0);
4438 case TARGET_NR_rmdir:
4439 if (!(p = lock_user_string(arg1)))
4441 ret = get_errno(rmdir(p));
4442 unlock_user(p, arg1, 0);
4445 ret = get_errno(dup(arg1));
4447 case TARGET_NR_pipe:
4450 ret = get_errno(pipe(host_pipe));
4451 if (!is_error(ret)) {
4452 #if defined(TARGET_MIPS)
4453 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4454 env->active_tc.gpr[3] = host_pipe[1];
4456 #elif defined(TARGET_SH4)
4457 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4460 if (put_user_s32(host_pipe[0], arg1)
4461 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4467 case TARGET_NR_times:
4469 struct target_tms *tmsp;
4471 ret = get_errno(times(&tms));
4473 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4476 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4477 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4478 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4479 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4482 ret = host_to_target_clock_t(ret);
4485 #ifdef TARGET_NR_prof
4486 case TARGET_NR_prof:
4489 #ifdef TARGET_NR_signal
4490 case TARGET_NR_signal:
4493 case TARGET_NR_acct:
4495 ret = get_errno(acct(NULL));
4497 if (!(p = lock_user_string(arg1)))
4499 ret = get_errno(acct(path(p)));
4500 unlock_user(p, arg1, 0);
4503 #ifdef TARGET_NR_umount2 /* not on alpha */
4504 case TARGET_NR_umount2:
4505 if (!(p = lock_user_string(arg1)))
4507 ret = get_errno(umount2(p, arg2));
4508 unlock_user(p, arg1, 0);
4511 #ifdef TARGET_NR_lock
4512 case TARGET_NR_lock:
4515 case TARGET_NR_ioctl:
4516 ret = do_ioctl(arg1, arg2, arg3);
4518 case TARGET_NR_fcntl:
4519 ret = do_fcntl(arg1, arg2, arg3);
4521 #ifdef TARGET_NR_mpx
4525 case TARGET_NR_setpgid:
4526 ret = get_errno(setpgid(arg1, arg2));
4528 #ifdef TARGET_NR_ulimit
4529 case TARGET_NR_ulimit:
4532 #ifdef TARGET_NR_oldolduname
4533 case TARGET_NR_oldolduname:
4536 case TARGET_NR_umask:
4537 ret = get_errno(umask(arg1));
4539 case TARGET_NR_chroot:
4540 if (!(p = lock_user_string(arg1)))
4542 ret = get_errno(chroot(p));
4543 unlock_user(p, arg1, 0);
4545 case TARGET_NR_ustat:
4547 case TARGET_NR_dup2:
4548 ret = get_errno(dup2(arg1, arg2));
4550 #ifdef TARGET_NR_getppid /* not on alpha */
4551 case TARGET_NR_getppid:
4552 ret = get_errno(getppid());
4555 case TARGET_NR_getpgrp:
4556 ret = get_errno(getpgrp());
4558 case TARGET_NR_setsid:
4559 ret = get_errno(setsid());
4561 #ifdef TARGET_NR_sigaction
4562 case TARGET_NR_sigaction:
4564 #if !defined(TARGET_MIPS)
4565 struct target_old_sigaction *old_act;
4566 struct target_sigaction act, oact, *pact;
4568 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4570 act._sa_handler = old_act->_sa_handler;
4571 target_siginitset(&act.sa_mask, old_act->sa_mask);
4572 act.sa_flags = old_act->sa_flags;
4573 act.sa_restorer = old_act->sa_restorer;
4574 unlock_user_struct(old_act, arg2, 0);
4579 ret = get_errno(do_sigaction(arg1, pact, &oact));
4580 if (!is_error(ret) && arg3) {
4581 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4583 old_act->_sa_handler = oact._sa_handler;
4584 old_act->sa_mask = oact.sa_mask.sig[0];
4585 old_act->sa_flags = oact.sa_flags;
4586 old_act->sa_restorer = oact.sa_restorer;
4587 unlock_user_struct(old_act, arg3, 1);
4590 struct target_sigaction act, oact, *pact, *old_act;
4593 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4595 act._sa_handler = old_act->_sa_handler;
4596 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4597 act.sa_flags = old_act->sa_flags;
4598 unlock_user_struct(old_act, arg2, 0);
4604 ret = get_errno(do_sigaction(arg1, pact, &oact));
4606 if (!is_error(ret) && arg3) {
4607 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4609 old_act->_sa_handler = oact._sa_handler;
4610 old_act->sa_flags = oact.sa_flags;
4611 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4612 old_act->sa_mask.sig[1] = 0;
4613 old_act->sa_mask.sig[2] = 0;
4614 old_act->sa_mask.sig[3] = 0;
4615 unlock_user_struct(old_act, arg3, 1);
4621 case TARGET_NR_rt_sigaction:
4623 struct target_sigaction *act;
4624 struct target_sigaction *oact;
4627 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4632 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4633 ret = -TARGET_EFAULT;
4634 goto rt_sigaction_fail;
4638 ret = get_errno(do_sigaction(arg1, act, oact));
4641 unlock_user_struct(act, arg2, 0);
4643 unlock_user_struct(oact, arg3, 1);
4646 #ifdef TARGET_NR_sgetmask /* not on alpha */
4647 case TARGET_NR_sgetmask:
4650 abi_ulong target_set;
4651 sigprocmask(0, NULL, &cur_set);
4652 host_to_target_old_sigset(&target_set, &cur_set);
4657 #ifdef TARGET_NR_ssetmask /* not on alpha */
4658 case TARGET_NR_ssetmask:
4660 sigset_t set, oset, cur_set;
4661 abi_ulong target_set = arg1;
4662 sigprocmask(0, NULL, &cur_set);
4663 target_to_host_old_sigset(&set, &target_set);
4664 sigorset(&set, &set, &cur_set);
4665 sigprocmask(SIG_SETMASK, &set, &oset);
4666 host_to_target_old_sigset(&target_set, &oset);
4671 #ifdef TARGET_NR_sigprocmask
4672 case TARGET_NR_sigprocmask:
4675 sigset_t set, oldset, *set_ptr;
4679 case TARGET_SIG_BLOCK:
4682 case TARGET_SIG_UNBLOCK:
4685 case TARGET_SIG_SETMASK:
4689 ret = -TARGET_EINVAL;
4692 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4694 target_to_host_old_sigset(&set, p);
4695 unlock_user(p, arg2, 0);
4701 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4702 if (!is_error(ret) && arg3) {
4703 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4705 host_to_target_old_sigset(p, &oldset);
4706 unlock_user(p, arg3, sizeof(target_sigset_t));
4711 case TARGET_NR_rt_sigprocmask:
4714 sigset_t set, oldset, *set_ptr;
4718 case TARGET_SIG_BLOCK:
4721 case TARGET_SIG_UNBLOCK:
4724 case TARGET_SIG_SETMASK:
4728 ret = -TARGET_EINVAL;
4731 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4733 target_to_host_sigset(&set, p);
4734 unlock_user(p, arg2, 0);
4740 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4741 if (!is_error(ret) && arg3) {
4742 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4744 host_to_target_sigset(p, &oldset);
4745 unlock_user(p, arg3, sizeof(target_sigset_t));
4749 #ifdef TARGET_NR_sigpending
4750 case TARGET_NR_sigpending:
4753 ret = get_errno(sigpending(&set));
4754 if (!is_error(ret)) {
4755 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4757 host_to_target_old_sigset(p, &set);
4758 unlock_user(p, arg1, sizeof(target_sigset_t));
4763 case TARGET_NR_rt_sigpending:
4766 ret = get_errno(sigpending(&set));
4767 if (!is_error(ret)) {
4768 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4770 host_to_target_sigset(p, &set);
4771 unlock_user(p, arg1, sizeof(target_sigset_t));
4775 #ifdef TARGET_NR_sigsuspend
4776 case TARGET_NR_sigsuspend:
4779 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4781 target_to_host_old_sigset(&set, p);
4782 unlock_user(p, arg1, 0);
4783 ret = get_errno(sigsuspend(&set));
4787 case TARGET_NR_rt_sigsuspend:
4790 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4792 target_to_host_sigset(&set, p);
4793 unlock_user(p, arg1, 0);
4794 ret = get_errno(sigsuspend(&set));
4797 case TARGET_NR_rt_sigtimedwait:
4800 struct timespec uts, *puts;
4803 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4805 target_to_host_sigset(&set, p);
4806 unlock_user(p, arg1, 0);
4809 target_to_host_timespec(puts, arg3);
4813 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4814 if (!is_error(ret) && arg2) {
4815 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4817 host_to_target_siginfo(p, &uinfo);
4818 unlock_user(p, arg2, sizeof(target_siginfo_t));
4822 case TARGET_NR_rt_sigqueueinfo:
4825 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4827 target_to_host_siginfo(&uinfo, p);
4828 unlock_user(p, arg1, 0);
4829 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4832 #ifdef TARGET_NR_sigreturn
4833 case TARGET_NR_sigreturn:
4834 /* NOTE: ret is eax, so not transcoding must be done */
4835 ret = do_sigreturn(cpu_env);
4838 case TARGET_NR_rt_sigreturn:
4839 /* NOTE: ret is eax, so not transcoding must be done */
4840 ret = do_rt_sigreturn(cpu_env);
4842 case TARGET_NR_sethostname:
4843 if (!(p = lock_user_string(arg1)))
4845 ret = get_errno(sethostname(p, arg2));
4846 unlock_user(p, arg1, 0);
4848 case TARGET_NR_setrlimit:
4850 /* XXX: convert resource ? */
4851 int resource = arg1;
4852 struct target_rlimit *target_rlim;
4854 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4856 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4857 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4858 unlock_user_struct(target_rlim, arg2, 0);
4859 ret = get_errno(setrlimit(resource, &rlim));
4862 case TARGET_NR_getrlimit:
4864 /* XXX: convert resource ? */
4865 int resource = arg1;
4866 struct target_rlimit *target_rlim;
4869 ret = get_errno(getrlimit(resource, &rlim));
4870 if (!is_error(ret)) {
4871 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4873 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4874 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4875 unlock_user_struct(target_rlim, arg2, 1);
4879 case TARGET_NR_getrusage:
4881 struct rusage rusage;
4882 ret = get_errno(getrusage(arg1, &rusage));
4883 if (!is_error(ret)) {
4884 host_to_target_rusage(arg2, &rusage);
4888 case TARGET_NR_gettimeofday:
4891 ret = get_errno(gettimeofday(&tv, NULL));
4892 if (!is_error(ret)) {
4893 if (copy_to_user_timeval(arg1, &tv))
4898 case TARGET_NR_settimeofday:
4901 if (copy_from_user_timeval(&tv, arg1))
4903 ret = get_errno(settimeofday(&tv, NULL));
4906 #ifdef TARGET_NR_select
4907 case TARGET_NR_select:
4909 struct target_sel_arg_struct *sel;
4910 abi_ulong inp, outp, exp, tvp;
4913 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4915 nsel = tswapl(sel->n);
4916 inp = tswapl(sel->inp);
4917 outp = tswapl(sel->outp);
4918 exp = tswapl(sel->exp);
4919 tvp = tswapl(sel->tvp);
4920 unlock_user_struct(sel, arg1, 0);
4921 ret = do_select(nsel, inp, outp, exp, tvp);
4925 case TARGET_NR_symlink:
4928 p = lock_user_string(arg1);
4929 p2 = lock_user_string(arg2);
4931 ret = -TARGET_EFAULT;
4933 ret = get_errno(symlink(p, p2));
4934 unlock_user(p2, arg2, 0);
4935 unlock_user(p, arg1, 0);
4938 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4939 case TARGET_NR_symlinkat:
4942 p = lock_user_string(arg1);
4943 p2 = lock_user_string(arg3);
4945 ret = -TARGET_EFAULT;
4947 ret = get_errno(sys_symlinkat(p, arg2, p2));
4948 unlock_user(p2, arg3, 0);
4949 unlock_user(p, arg1, 0);
4953 #ifdef TARGET_NR_oldlstat
4954 case TARGET_NR_oldlstat:
4957 case TARGET_NR_readlink:
4960 p = lock_user_string(arg1);
4961 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4963 ret = -TARGET_EFAULT;
4965 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
4966 char real[PATH_MAX];
4967 temp = realpath(exec_path,real);
4968 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
4969 snprintf((char *)p2, arg3, "%s", real);
4972 ret = get_errno(readlink(path(p), p2, arg3));
4974 unlock_user(p2, arg2, ret);
4975 unlock_user(p, arg1, 0);
4978 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4979 case TARGET_NR_readlinkat:
4982 p = lock_user_string(arg2);
4983 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4985 ret = -TARGET_EFAULT;
4987 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4988 unlock_user(p2, arg3, ret);
4989 unlock_user(p, arg2, 0);
4993 #ifdef TARGET_NR_uselib
4994 case TARGET_NR_uselib:
4997 #ifdef TARGET_NR_swapon
4998 case TARGET_NR_swapon:
4999 if (!(p = lock_user_string(arg1)))
5001 ret = get_errno(swapon(p, arg2));
5002 unlock_user(p, arg1, 0);
5005 case TARGET_NR_reboot:
5007 #ifdef TARGET_NR_readdir
5008 case TARGET_NR_readdir:
5011 #ifdef TARGET_NR_mmap
5012 case TARGET_NR_mmap:
5013 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5016 abi_ulong v1, v2, v3, v4, v5, v6;
5017 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5025 unlock_user(v, arg1, 0);
5026 ret = get_errno(target_mmap(v1, v2, v3,
5027 target_to_host_bitmask(v4, mmap_flags_tbl),
5031 ret = get_errno(target_mmap(arg1, arg2, arg3,
5032 target_to_host_bitmask(arg4, mmap_flags_tbl),
5038 #ifdef TARGET_NR_mmap2
5039 case TARGET_NR_mmap2:
5041 #define MMAP_SHIFT 12
5043 ret = get_errno(target_mmap(arg1, arg2, arg3,
5044 target_to_host_bitmask(arg4, mmap_flags_tbl),
5046 arg6 << MMAP_SHIFT));
5049 case TARGET_NR_munmap:
5050 ret = get_errno(target_munmap(arg1, arg2));
5052 case TARGET_NR_mprotect:
5053 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5055 #ifdef TARGET_NR_mremap
5056 case TARGET_NR_mremap:
5057 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5060 /* ??? msync/mlock/munlock are broken for softmmu. */
5061 #ifdef TARGET_NR_msync
5062 case TARGET_NR_msync:
5063 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5066 #ifdef TARGET_NR_mlock
5067 case TARGET_NR_mlock:
5068 ret = get_errno(mlock(g2h(arg1), arg2));
5071 #ifdef TARGET_NR_munlock
5072 case TARGET_NR_munlock:
5073 ret = get_errno(munlock(g2h(arg1), arg2));
5076 #ifdef TARGET_NR_mlockall
5077 case TARGET_NR_mlockall:
5078 ret = get_errno(mlockall(arg1));
5081 #ifdef TARGET_NR_munlockall
5082 case TARGET_NR_munlockall:
5083 ret = get_errno(munlockall());
5086 case TARGET_NR_truncate:
5087 if (!(p = lock_user_string(arg1)))
5089 ret = get_errno(truncate(p, arg2));
5090 unlock_user(p, arg1, 0);
5092 case TARGET_NR_ftruncate:
5093 ret = get_errno(ftruncate(arg1, arg2));
5095 case TARGET_NR_fchmod:
5096 ret = get_errno(fchmod(arg1, arg2));
5098 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5099 case TARGET_NR_fchmodat:
5100 if (!(p = lock_user_string(arg2)))
5102 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
5103 unlock_user(p, arg2, 0);
5106 case TARGET_NR_getpriority:
5107 /* libc does special remapping of the return value of
5108 * sys_getpriority() so it's just easiest to call
5109 * sys_getpriority() directly rather than through libc. */
5110 ret = sys_getpriority(arg1, arg2);
5112 case TARGET_NR_setpriority:
5113 ret = get_errno(setpriority(arg1, arg2, arg3));
5115 #ifdef TARGET_NR_profil
5116 case TARGET_NR_profil:
5119 case TARGET_NR_statfs:
5120 if (!(p = lock_user_string(arg1)))
5122 ret = get_errno(statfs(path(p), &stfs));
5123 unlock_user(p, arg1, 0);
5125 if (!is_error(ret)) {
5126 struct target_statfs *target_stfs;
5128 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5130 __put_user(stfs.f_type, &target_stfs->f_type);
5131 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5132 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5133 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5134 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5135 __put_user(stfs.f_files, &target_stfs->f_files);
5136 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5137 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5138 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5139 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5140 unlock_user_struct(target_stfs, arg2, 1);
5143 case TARGET_NR_fstatfs:
5144 ret = get_errno(fstatfs(arg1, &stfs));
5145 goto convert_statfs;
5146 #ifdef TARGET_NR_statfs64
5147 case TARGET_NR_statfs64:
5148 if (!(p = lock_user_string(arg1)))
5150 ret = get_errno(statfs(path(p), &stfs));
5151 unlock_user(p, arg1, 0);
5153 if (!is_error(ret)) {
5154 struct target_statfs64 *target_stfs;
5156 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5158 __put_user(stfs.f_type, &target_stfs->f_type);
5159 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5160 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5161 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5162 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5163 __put_user(stfs.f_files, &target_stfs->f_files);
5164 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5165 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5166 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5167 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5168 unlock_user_struct(target_stfs, arg3, 1);
5171 case TARGET_NR_fstatfs64:
5172 ret = get_errno(fstatfs(arg1, &stfs));
5173 goto convert_statfs64;
5175 #ifdef TARGET_NR_ioperm
5176 case TARGET_NR_ioperm:
5179 #ifdef TARGET_NR_socketcall
5180 case TARGET_NR_socketcall:
5181 ret = do_socketcall(arg1, arg2);
5184 #ifdef TARGET_NR_accept
5185 case TARGET_NR_accept:
5186 ret = do_accept(arg1, arg2, arg3);
5189 #ifdef TARGET_NR_bind
5190 case TARGET_NR_bind:
5191 ret = do_bind(arg1, arg2, arg3);
5194 #ifdef TARGET_NR_connect
5195 case TARGET_NR_connect:
5196 ret = do_connect(arg1, arg2, arg3);
5199 #ifdef TARGET_NR_getpeername
5200 case TARGET_NR_getpeername:
5201 ret = do_getpeername(arg1, arg2, arg3);
5204 #ifdef TARGET_NR_getsockname
5205 case TARGET_NR_getsockname:
5206 ret = do_getsockname(arg1, arg2, arg3);
5209 #ifdef TARGET_NR_getsockopt
5210 case TARGET_NR_getsockopt:
5211 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5214 #ifdef TARGET_NR_listen
5215 case TARGET_NR_listen:
5216 ret = get_errno(listen(arg1, arg2));
5219 #ifdef TARGET_NR_recv
5220 case TARGET_NR_recv:
5221 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5224 #ifdef TARGET_NR_recvfrom
5225 case TARGET_NR_recvfrom:
5226 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5229 #ifdef TARGET_NR_recvmsg
5230 case TARGET_NR_recvmsg:
5231 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5234 #ifdef TARGET_NR_send
5235 case TARGET_NR_send:
5236 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5239 #ifdef TARGET_NR_sendmsg
5240 case TARGET_NR_sendmsg:
5241 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5244 #ifdef TARGET_NR_sendto
5245 case TARGET_NR_sendto:
5246 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5249 #ifdef TARGET_NR_shutdown
5250 case TARGET_NR_shutdown:
5251 ret = get_errno(shutdown(arg1, arg2));
5254 #ifdef TARGET_NR_socket
5255 case TARGET_NR_socket:
5256 ret = do_socket(arg1, arg2, arg3);
5259 #ifdef TARGET_NR_socketpair
5260 case TARGET_NR_socketpair:
5261 ret = do_socketpair(arg1, arg2, arg3, arg4);
5264 #ifdef TARGET_NR_setsockopt
5265 case TARGET_NR_setsockopt:
5266 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5270 case TARGET_NR_syslog:
5271 if (!(p = lock_user_string(arg2)))
5273 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5274 unlock_user(p, arg2, 0);
5277 case TARGET_NR_setitimer:
5279 struct itimerval value, ovalue, *pvalue;
5283 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5284 || copy_from_user_timeval(&pvalue->it_value,
5285 arg2 + sizeof(struct target_timeval)))
5290 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5291 if (!is_error(ret) && arg3) {
5292 if (copy_to_user_timeval(arg3,
5293 &ovalue.it_interval)
5294 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5300 case TARGET_NR_getitimer:
5302 struct itimerval value;
5304 ret = get_errno(getitimer(arg1, &value));
5305 if (!is_error(ret) && arg2) {
5306 if (copy_to_user_timeval(arg2,
5308 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5314 case TARGET_NR_stat:
5315 if (!(p = lock_user_string(arg1)))
5317 ret = get_errno(stat(path(p), &st));
5318 unlock_user(p, arg1, 0);
5320 case TARGET_NR_lstat:
5321 if (!(p = lock_user_string(arg1)))
5323 ret = get_errno(lstat(path(p), &st));
5324 unlock_user(p, arg1, 0);
5326 case TARGET_NR_fstat:
5328 ret = get_errno(fstat(arg1, &st));
5330 if (!is_error(ret)) {
5331 struct target_stat *target_st;
5333 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5335 __put_user(st.st_dev, &target_st->st_dev);
5336 __put_user(st.st_ino, &target_st->st_ino);
5337 __put_user(st.st_mode, &target_st->st_mode);
5338 __put_user(st.st_uid, &target_st->st_uid);
5339 __put_user(st.st_gid, &target_st->st_gid);
5340 __put_user(st.st_nlink, &target_st->st_nlink);
5341 __put_user(st.st_rdev, &target_st->st_rdev);
5342 __put_user(st.st_size, &target_st->st_size);
5343 __put_user(st.st_blksize, &target_st->st_blksize);
5344 __put_user(st.st_blocks, &target_st->st_blocks);
5345 __put_user(st.st_atime, &target_st->target_st_atime);
5346 __put_user(st.st_mtime, &target_st->target_st_mtime);
5347 __put_user(st.st_ctime, &target_st->target_st_ctime);
5348 unlock_user_struct(target_st, arg2, 1);
5352 #ifdef TARGET_NR_olduname
5353 case TARGET_NR_olduname:
5356 #ifdef TARGET_NR_iopl
5357 case TARGET_NR_iopl:
5360 case TARGET_NR_vhangup:
5361 ret = get_errno(vhangup());
5363 #ifdef TARGET_NR_idle
5364 case TARGET_NR_idle:
5367 #ifdef TARGET_NR_syscall
5368 case TARGET_NR_syscall:
5369 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5372 case TARGET_NR_wait4:
5375 abi_long status_ptr = arg2;
5376 struct rusage rusage, *rusage_ptr;
5377 abi_ulong target_rusage = arg4;
5379 rusage_ptr = &rusage;
5382 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5383 if (!is_error(ret)) {
5385 if (put_user_s32(status, status_ptr))
5389 host_to_target_rusage(target_rusage, &rusage);
5393 #ifdef TARGET_NR_swapoff
5394 case TARGET_NR_swapoff:
5395 if (!(p = lock_user_string(arg1)))
5397 ret = get_errno(swapoff(p));
5398 unlock_user(p, arg1, 0);
5401 case TARGET_NR_sysinfo:
5403 struct target_sysinfo *target_value;
5404 struct sysinfo value;
5405 ret = get_errno(sysinfo(&value));
5406 if (!is_error(ret) && arg1)
5408 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5410 __put_user(value.uptime, &target_value->uptime);
5411 __put_user(value.loads[0], &target_value->loads[0]);
5412 __put_user(value.loads[1], &target_value->loads[1]);
5413 __put_user(value.loads[2], &target_value->loads[2]);
5414 __put_user(value.totalram, &target_value->totalram);
5415 __put_user(value.freeram, &target_value->freeram);
5416 __put_user(value.sharedram, &target_value->sharedram);
5417 __put_user(value.bufferram, &target_value->bufferram);
5418 __put_user(value.totalswap, &target_value->totalswap);
5419 __put_user(value.freeswap, &target_value->freeswap);
5420 __put_user(value.procs, &target_value->procs);
5421 __put_user(value.totalhigh, &target_value->totalhigh);
5422 __put_user(value.freehigh, &target_value->freehigh);
5423 __put_user(value.mem_unit, &target_value->mem_unit);
5424 unlock_user_struct(target_value, arg1, 1);
5428 #ifdef TARGET_NR_ipc
5430 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5433 #ifdef TARGET_NR_semget
5434 case TARGET_NR_semget:
5435 ret = get_errno(semget(arg1, arg2, arg3));
5438 #ifdef TARGET_NR_semop
5439 case TARGET_NR_semop:
5440 ret = get_errno(do_semop(arg1, arg2, arg3));
5443 #ifdef TARGET_NR_semctl
5444 case TARGET_NR_semctl:
5445 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5448 #ifdef TARGET_NR_msgctl
5449 case TARGET_NR_msgctl:
5450 ret = do_msgctl(arg1, arg2, arg3);
5453 #ifdef TARGET_NR_msgget
5454 case TARGET_NR_msgget:
5455 ret = get_errno(msgget(arg1, arg2));
5458 #ifdef TARGET_NR_msgrcv
5459 case TARGET_NR_msgrcv:
5460 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5463 #ifdef TARGET_NR_msgsnd
5464 case TARGET_NR_msgsnd:
5465 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5468 #ifdef TARGET_NR_shmget
5469 case TARGET_NR_shmget:
5470 ret = get_errno(shmget(arg1, arg2, arg3));
5473 #ifdef TARGET_NR_shmctl
5474 case TARGET_NR_shmctl:
5475 ret = do_shmctl(arg1, arg2, arg3);
5478 #ifdef TARGET_NR_shmat
5479 case TARGET_NR_shmat:
5484 err = do_shmat(arg1, arg2, arg3, &_ret);
5485 ret = err ? err : _ret;
5489 #ifdef TARGET_NR_shmdt
5490 case TARGET_NR_shmdt:
5491 ret = do_shmdt(arg1);
5494 case TARGET_NR_fsync:
5495 ret = get_errno(fsync(arg1));
5497 case TARGET_NR_clone:
5498 #if defined(TARGET_SH4)
5499 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5500 #elif defined(TARGET_CRIS)
5501 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5503 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5506 #ifdef __NR_exit_group
5507 /* new thread calls */
5508 case TARGET_NR_exit_group:
5512 gdb_exit(cpu_env, arg1);
5513 ret = get_errno(exit_group(arg1));
5516 case TARGET_NR_setdomainname:
5517 if (!(p = lock_user_string(arg1)))
5519 ret = get_errno(setdomainname(p, arg2));
5520 unlock_user(p, arg1, 0);
5522 case TARGET_NR_uname:
5523 /* no need to transcode because we use the linux syscall */
5525 struct new_utsname * buf;
5527 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5529 ret = get_errno(sys_uname(buf));
5530 if (!is_error(ret)) {
5531 /* Overrite the native machine name with whatever is being
5533 strcpy (buf->machine, UNAME_MACHINE);
5534 /* Allow the user to override the reported release. */
5535 if (qemu_uname_release && *qemu_uname_release)
5536 strcpy (buf->release, qemu_uname_release);
5538 unlock_user_struct(buf, arg1, 1);
5542 case TARGET_NR_modify_ldt:
5543 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5545 #if !defined(TARGET_X86_64)
5546 case TARGET_NR_vm86old:
5548 case TARGET_NR_vm86:
5549 ret = do_vm86(cpu_env, arg1, arg2);
5553 case TARGET_NR_adjtimex:
5555 #ifdef TARGET_NR_create_module
5556 case TARGET_NR_create_module:
5558 case TARGET_NR_init_module:
5559 case TARGET_NR_delete_module:
5560 #ifdef TARGET_NR_get_kernel_syms
5561 case TARGET_NR_get_kernel_syms:
5564 case TARGET_NR_quotactl:
5566 case TARGET_NR_getpgid:
5567 ret = get_errno(getpgid(arg1));
5569 case TARGET_NR_fchdir:
5570 ret = get_errno(fchdir(arg1));
5572 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5573 case TARGET_NR_bdflush:
5576 #ifdef TARGET_NR_sysfs
5577 case TARGET_NR_sysfs:
5580 case TARGET_NR_personality:
5581 ret = get_errno(personality(arg1));
5583 #ifdef TARGET_NR_afs_syscall
5584 case TARGET_NR_afs_syscall:
5587 #ifdef TARGET_NR__llseek /* Not on alpha */
5588 case TARGET_NR__llseek:
5590 #if defined (__x86_64__)
5591 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5592 if (put_user_s64(ret, arg4))
5596 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5597 if (put_user_s64(res, arg4))
5603 case TARGET_NR_getdents:
5604 #if TARGET_ABI_BITS != 32
5606 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5608 struct target_dirent *target_dirp;
5609 struct linux_dirent *dirp;
5610 abi_long count = arg3;
5612 dirp = malloc(count);
5614 ret = -TARGET_ENOMEM;
5618 ret = get_errno(sys_getdents(arg1, dirp, count));
5619 if (!is_error(ret)) {
5620 struct linux_dirent *de;
5621 struct target_dirent *tde;
5623 int reclen, treclen;
5624 int count1, tnamelen;
5628 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5632 reclen = de->d_reclen;
5633 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5634 tde->d_reclen = tswap16(treclen);
5635 tde->d_ino = tswapl(de->d_ino);
5636 tde->d_off = tswapl(de->d_off);
5637 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5640 /* XXX: may not be correct */
5641 pstrcpy(tde->d_name, tnamelen, de->d_name);
5642 de = (struct linux_dirent *)((char *)de + reclen);
5644 tde = (struct target_dirent *)((char *)tde + treclen);
5648 unlock_user(target_dirp, arg2, ret);
5654 struct linux_dirent *dirp;
5655 abi_long count = arg3;
5657 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5659 ret = get_errno(sys_getdents(arg1, dirp, count));
5660 if (!is_error(ret)) {
5661 struct linux_dirent *de;
5666 reclen = de->d_reclen;
5669 de->d_reclen = tswap16(reclen);
5670 tswapls(&de->d_ino);
5671 tswapls(&de->d_off);
5672 de = (struct linux_dirent *)((char *)de + reclen);
5676 unlock_user(dirp, arg2, ret);
5680 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5681 case TARGET_NR_getdents64:
5683 struct linux_dirent64 *dirp;
5684 abi_long count = arg3;
5685 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5687 ret = get_errno(sys_getdents64(arg1, dirp, count));
5688 if (!is_error(ret)) {
5689 struct linux_dirent64 *de;
5694 reclen = de->d_reclen;
5697 de->d_reclen = tswap16(reclen);
5698 tswap64s((uint64_t *)&de->d_ino);
5699 tswap64s((uint64_t *)&de->d_off);
5700 de = (struct linux_dirent64 *)((char *)de + reclen);
5704 unlock_user(dirp, arg2, ret);
5707 #endif /* TARGET_NR_getdents64 */
5708 #ifdef TARGET_NR__newselect
5709 case TARGET_NR__newselect:
5710 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5713 #ifdef TARGET_NR_poll
5714 case TARGET_NR_poll:
5716 struct target_pollfd *target_pfd;
5717 unsigned int nfds = arg2;
5722 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5725 pfd = alloca(sizeof(struct pollfd) * nfds);
5726 for(i = 0; i < nfds; i++) {
5727 pfd[i].fd = tswap32(target_pfd[i].fd);
5728 pfd[i].events = tswap16(target_pfd[i].events);
5730 ret = get_errno(poll(pfd, nfds, timeout));
5731 if (!is_error(ret)) {
5732 for(i = 0; i < nfds; i++) {
5733 target_pfd[i].revents = tswap16(pfd[i].revents);
5735 ret += nfds * (sizeof(struct target_pollfd)
5736 - sizeof(struct pollfd));
5738 unlock_user(target_pfd, arg1, ret);
5742 case TARGET_NR_flock:
5743 /* NOTE: the flock constant seems to be the same for every
5745 ret = get_errno(flock(arg1, arg2));
5747 case TARGET_NR_readv:
5752 vec = alloca(count * sizeof(struct iovec));
5753 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5755 ret = get_errno(readv(arg1, vec, count));
5756 unlock_iovec(vec, arg2, count, 1);
5759 case TARGET_NR_writev:
5764 vec = alloca(count * sizeof(struct iovec));
5765 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5767 ret = get_errno(writev(arg1, vec, count));
5768 unlock_iovec(vec, arg2, count, 0);
5771 case TARGET_NR_getsid:
5772 ret = get_errno(getsid(arg1));
5774 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5775 case TARGET_NR_fdatasync:
5776 ret = get_errno(fdatasync(arg1));
5779 case TARGET_NR__sysctl:
5780 /* We don't implement this, but ENOTDIR is always a safe
5782 ret = -TARGET_ENOTDIR;
5784 case TARGET_NR_sched_setparam:
5786 struct sched_param *target_schp;
5787 struct sched_param schp;
5789 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5791 schp.sched_priority = tswap32(target_schp->sched_priority);
5792 unlock_user_struct(target_schp, arg2, 0);
5793 ret = get_errno(sched_setparam(arg1, &schp));
5796 case TARGET_NR_sched_getparam:
5798 struct sched_param *target_schp;
5799 struct sched_param schp;
5800 ret = get_errno(sched_getparam(arg1, &schp));
5801 if (!is_error(ret)) {
5802 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5804 target_schp->sched_priority = tswap32(schp.sched_priority);
5805 unlock_user_struct(target_schp, arg2, 1);
5809 case TARGET_NR_sched_setscheduler:
5811 struct sched_param *target_schp;
5812 struct sched_param schp;
5813 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5815 schp.sched_priority = tswap32(target_schp->sched_priority);
5816 unlock_user_struct(target_schp, arg3, 0);
5817 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5820 case TARGET_NR_sched_getscheduler:
5821 ret = get_errno(sched_getscheduler(arg1));
5823 case TARGET_NR_sched_yield:
5824 ret = get_errno(sched_yield());
5826 case TARGET_NR_sched_get_priority_max:
5827 ret = get_errno(sched_get_priority_max(arg1));
5829 case TARGET_NR_sched_get_priority_min:
5830 ret = get_errno(sched_get_priority_min(arg1));
5832 case TARGET_NR_sched_rr_get_interval:
5835 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5836 if (!is_error(ret)) {
5837 host_to_target_timespec(arg2, &ts);
5841 case TARGET_NR_nanosleep:
5843 struct timespec req, rem;
5844 target_to_host_timespec(&req, arg1);
5845 ret = get_errno(nanosleep(&req, &rem));
5846 if (is_error(ret) && arg2) {
5847 host_to_target_timespec(arg2, &rem);
5851 #ifdef TARGET_NR_query_module
5852 case TARGET_NR_query_module:
5855 #ifdef TARGET_NR_nfsservctl
5856 case TARGET_NR_nfsservctl:
5859 case TARGET_NR_prctl:
5862 case PR_GET_PDEATHSIG:
5865 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5866 if (!is_error(ret) && arg2
5867 && put_user_ual(deathsig, arg2))
5872 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5876 #ifdef TARGET_NR_arch_prctl
5877 case TARGET_NR_arch_prctl:
5878 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5879 ret = do_arch_prctl(cpu_env, arg1, arg2);
5885 #ifdef TARGET_NR_pread
5886 case TARGET_NR_pread:
5888 if (((CPUARMState *)cpu_env)->eabi)
5891 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5893 ret = get_errno(pread(arg1, p, arg3, arg4));
5894 unlock_user(p, arg2, ret);
5896 case TARGET_NR_pwrite:
5898 if (((CPUARMState *)cpu_env)->eabi)
5901 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5903 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5904 unlock_user(p, arg2, 0);
5907 #ifdef TARGET_NR_pread64
5908 case TARGET_NR_pread64:
5909 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5911 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5912 unlock_user(p, arg2, ret);
5914 case TARGET_NR_pwrite64:
5915 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5917 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5918 unlock_user(p, arg2, 0);
5921 case TARGET_NR_getcwd:
5922 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5924 ret = get_errno(sys_getcwd1(p, arg2));
5925 unlock_user(p, arg1, ret);
5927 case TARGET_NR_capget:
5929 case TARGET_NR_capset:
5931 case TARGET_NR_sigaltstack:
5932 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5933 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5934 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5939 case TARGET_NR_sendfile:
5941 #ifdef TARGET_NR_getpmsg
5942 case TARGET_NR_getpmsg:
5945 #ifdef TARGET_NR_putpmsg
5946 case TARGET_NR_putpmsg:
5949 #ifdef TARGET_NR_vfork
5950 case TARGET_NR_vfork:
5951 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5955 #ifdef TARGET_NR_ugetrlimit
5956 case TARGET_NR_ugetrlimit:
5959 ret = get_errno(getrlimit(arg1, &rlim));
5960 if (!is_error(ret)) {
5961 struct target_rlimit *target_rlim;
5962 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5964 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5965 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5966 unlock_user_struct(target_rlim, arg2, 1);
5971 #ifdef TARGET_NR_truncate64
5972 case TARGET_NR_truncate64:
5973 if (!(p = lock_user_string(arg1)))
5975 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5976 unlock_user(p, arg1, 0);
5979 #ifdef TARGET_NR_ftruncate64
5980 case TARGET_NR_ftruncate64:
5981 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5984 #ifdef TARGET_NR_stat64
5985 case TARGET_NR_stat64:
5986 if (!(p = lock_user_string(arg1)))
5988 ret = get_errno(stat(path(p), &st));
5989 unlock_user(p, arg1, 0);
5991 ret = host_to_target_stat64(cpu_env, arg2, &st);
5994 #ifdef TARGET_NR_lstat64
5995 case TARGET_NR_lstat64:
5996 if (!(p = lock_user_string(arg1)))
5998 ret = get_errno(lstat(path(p), &st));
5999 unlock_user(p, arg1, 0);
6001 ret = host_to_target_stat64(cpu_env, arg2, &st);
6004 #ifdef TARGET_NR_fstat64
6005 case TARGET_NR_fstat64:
6006 ret = get_errno(fstat(arg1, &st));
6008 ret = host_to_target_stat64(cpu_env, arg2, &st);
6011 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6012 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6013 #ifdef TARGET_NR_fstatat64
6014 case TARGET_NR_fstatat64:
6016 #ifdef TARGET_NR_newfstatat
6017 case TARGET_NR_newfstatat:
6019 if (!(p = lock_user_string(arg2)))
6021 #ifdef __NR_fstatat64
6022 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6024 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6027 ret = host_to_target_stat64(cpu_env, arg3, &st);
6031 case TARGET_NR_lchown:
6032 if (!(p = lock_user_string(arg1)))
6034 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6035 unlock_user(p, arg1, 0);
6037 case TARGET_NR_getuid:
6038 ret = get_errno(high2lowuid(getuid()));
6040 case TARGET_NR_getgid:
6041 ret = get_errno(high2lowgid(getgid()));
6043 case TARGET_NR_geteuid:
6044 ret = get_errno(high2lowuid(geteuid()));
6046 case TARGET_NR_getegid:
6047 ret = get_errno(high2lowgid(getegid()));
6049 case TARGET_NR_setreuid:
6050 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6052 case TARGET_NR_setregid:
6053 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6055 case TARGET_NR_getgroups:
6057 int gidsetsize = arg1;
6058 uint16_t *target_grouplist;
6062 grouplist = alloca(gidsetsize * sizeof(gid_t));
6063 ret = get_errno(getgroups(gidsetsize, grouplist));
6064 if (gidsetsize == 0)
6066 if (!is_error(ret)) {
6067 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6068 if (!target_grouplist)
6070 for(i = 0;i < ret; i++)
6071 target_grouplist[i] = tswap16(grouplist[i]);
6072 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6076 case TARGET_NR_setgroups:
6078 int gidsetsize = arg1;
6079 uint16_t *target_grouplist;
6083 grouplist = alloca(gidsetsize * sizeof(gid_t));
6084 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6085 if (!target_grouplist) {
6086 ret = -TARGET_EFAULT;
6089 for(i = 0;i < gidsetsize; i++)
6090 grouplist[i] = tswap16(target_grouplist[i]);
6091 unlock_user(target_grouplist, arg2, 0);
6092 ret = get_errno(setgroups(gidsetsize, grouplist));
6095 case TARGET_NR_fchown:
6096 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6098 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6099 case TARGET_NR_fchownat:
6100 if (!(p = lock_user_string(arg2)))
6102 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6103 unlock_user(p, arg2, 0);
6106 #ifdef TARGET_NR_setresuid
6107 case TARGET_NR_setresuid:
6108 ret = get_errno(setresuid(low2highuid(arg1),
6110 low2highuid(arg3)));
6113 #ifdef TARGET_NR_getresuid
6114 case TARGET_NR_getresuid:
6116 uid_t ruid, euid, suid;
6117 ret = get_errno(getresuid(&ruid, &euid, &suid));
6118 if (!is_error(ret)) {
6119 if (put_user_u16(high2lowuid(ruid), arg1)
6120 || put_user_u16(high2lowuid(euid), arg2)
6121 || put_user_u16(high2lowuid(suid), arg3))
6127 #ifdef TARGET_NR_getresgid
6128 case TARGET_NR_setresgid:
6129 ret = get_errno(setresgid(low2highgid(arg1),
6131 low2highgid(arg3)));
6134 #ifdef TARGET_NR_getresgid
6135 case TARGET_NR_getresgid:
6137 gid_t rgid, egid, sgid;
6138 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6139 if (!is_error(ret)) {
6140 if (put_user_u16(high2lowgid(rgid), arg1)
6141 || put_user_u16(high2lowgid(egid), arg2)
6142 || put_user_u16(high2lowgid(sgid), arg3))
6148 case TARGET_NR_chown:
6149 if (!(p = lock_user_string(arg1)))
6151 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6152 unlock_user(p, arg1, 0);
6154 case TARGET_NR_setuid:
6155 ret = get_errno(setuid(low2highuid(arg1)));
6157 case TARGET_NR_setgid:
6158 ret = get_errno(setgid(low2highgid(arg1)));
6160 case TARGET_NR_setfsuid:
6161 ret = get_errno(setfsuid(arg1));
6163 case TARGET_NR_setfsgid:
6164 ret = get_errno(setfsgid(arg1));
6166 #endif /* USE_UID16 */
6168 #ifdef TARGET_NR_lchown32
6169 case TARGET_NR_lchown32:
6170 if (!(p = lock_user_string(arg1)))
6172 ret = get_errno(lchown(p, arg2, arg3));
6173 unlock_user(p, arg1, 0);
6176 #ifdef TARGET_NR_getuid32
6177 case TARGET_NR_getuid32:
6178 ret = get_errno(getuid());
6182 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6183 /* Alpha specific */
6184 case TARGET_NR_getxuid:
6188 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6190 ret = get_errno(getuid());
6193 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6194 /* Alpha specific */
6195 case TARGET_NR_getxgid:
6199 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6201 ret = get_errno(getgid());
6205 #ifdef TARGET_NR_getgid32
6206 case TARGET_NR_getgid32:
6207 ret = get_errno(getgid());
6210 #ifdef TARGET_NR_geteuid32
6211 case TARGET_NR_geteuid32:
6212 ret = get_errno(geteuid());
6215 #ifdef TARGET_NR_getegid32
6216 case TARGET_NR_getegid32:
6217 ret = get_errno(getegid());
6220 #ifdef TARGET_NR_setreuid32
6221 case TARGET_NR_setreuid32:
6222 ret = get_errno(setreuid(arg1, arg2));
6225 #ifdef TARGET_NR_setregid32
6226 case TARGET_NR_setregid32:
6227 ret = get_errno(setregid(arg1, arg2));
6230 #ifdef TARGET_NR_getgroups32
6231 case TARGET_NR_getgroups32:
6233 int gidsetsize = arg1;
6234 uint32_t *target_grouplist;
6238 grouplist = alloca(gidsetsize * sizeof(gid_t));
6239 ret = get_errno(getgroups(gidsetsize, grouplist));
6240 if (gidsetsize == 0)
6242 if (!is_error(ret)) {
6243 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6244 if (!target_grouplist) {
6245 ret = -TARGET_EFAULT;
6248 for(i = 0;i < ret; i++)
6249 target_grouplist[i] = tswap32(grouplist[i]);
6250 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6255 #ifdef TARGET_NR_setgroups32
6256 case TARGET_NR_setgroups32:
6258 int gidsetsize = arg1;
6259 uint32_t *target_grouplist;
6263 grouplist = alloca(gidsetsize * sizeof(gid_t));
6264 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6265 if (!target_grouplist) {
6266 ret = -TARGET_EFAULT;
6269 for(i = 0;i < gidsetsize; i++)
6270 grouplist[i] = tswap32(target_grouplist[i]);
6271 unlock_user(target_grouplist, arg2, 0);
6272 ret = get_errno(setgroups(gidsetsize, grouplist));
6276 #ifdef TARGET_NR_fchown32
6277 case TARGET_NR_fchown32:
6278 ret = get_errno(fchown(arg1, arg2, arg3));
6281 #ifdef TARGET_NR_setresuid32
6282 case TARGET_NR_setresuid32:
6283 ret = get_errno(setresuid(arg1, arg2, arg3));
6286 #ifdef TARGET_NR_getresuid32
6287 case TARGET_NR_getresuid32:
6289 uid_t ruid, euid, suid;
6290 ret = get_errno(getresuid(&ruid, &euid, &suid));
6291 if (!is_error(ret)) {
6292 if (put_user_u32(ruid, arg1)
6293 || put_user_u32(euid, arg2)
6294 || put_user_u32(suid, arg3))
6300 #ifdef TARGET_NR_setresgid32
6301 case TARGET_NR_setresgid32:
6302 ret = get_errno(setresgid(arg1, arg2, arg3));
6305 #ifdef TARGET_NR_getresgid32
6306 case TARGET_NR_getresgid32:
6308 gid_t rgid, egid, sgid;
6309 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6310 if (!is_error(ret)) {
6311 if (put_user_u32(rgid, arg1)
6312 || put_user_u32(egid, arg2)
6313 || put_user_u32(sgid, arg3))
6319 #ifdef TARGET_NR_chown32
6320 case TARGET_NR_chown32:
6321 if (!(p = lock_user_string(arg1)))
6323 ret = get_errno(chown(p, arg2, arg3));
6324 unlock_user(p, arg1, 0);
6327 #ifdef TARGET_NR_setuid32
6328 case TARGET_NR_setuid32:
6329 ret = get_errno(setuid(arg1));
6332 #ifdef TARGET_NR_setgid32
6333 case TARGET_NR_setgid32:
6334 ret = get_errno(setgid(arg1));
6337 #ifdef TARGET_NR_setfsuid32
6338 case TARGET_NR_setfsuid32:
6339 ret = get_errno(setfsuid(arg1));
6342 #ifdef TARGET_NR_setfsgid32
6343 case TARGET_NR_setfsgid32:
6344 ret = get_errno(setfsgid(arg1));
6348 case TARGET_NR_pivot_root:
6350 #ifdef TARGET_NR_mincore
6351 case TARGET_NR_mincore:
6354 ret = -TARGET_EFAULT;
6355 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6357 if (!(p = lock_user_string(arg3)))
6359 ret = get_errno(mincore(a, arg2, p));
6360 unlock_user(p, arg3, ret);
6362 unlock_user(a, arg1, 0);
6366 #ifdef TARGET_NR_arm_fadvise64_64
6367 case TARGET_NR_arm_fadvise64_64:
6370 * arm_fadvise64_64 looks like fadvise64_64 but
6371 * with different argument order
6379 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6380 #ifdef TARGET_NR_fadvise64_64
6381 case TARGET_NR_fadvise64_64:
6383 /* This is a hint, so ignoring and returning success is ok. */
6387 #ifdef TARGET_NR_madvise
6388 case TARGET_NR_madvise:
6389 /* A straight passthrough may not be safe because qemu sometimes
6390 turns private flie-backed mappings into anonymous mappings.
6391 This will break MADV_DONTNEED.
6392 This is a hint, so ignoring and returning success is ok. */
6396 #if TARGET_ABI_BITS == 32
6397 case TARGET_NR_fcntl64:
6401 struct target_flock64 *target_fl;
6403 struct target_eabi_flock64 *target_efl;
6407 case TARGET_F_GETLK64:
6410 case TARGET_F_SETLK64:
6413 case TARGET_F_SETLKW64:
6422 case TARGET_F_GETLK64:
6424 if (((CPUARMState *)cpu_env)->eabi) {
6425 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6427 fl.l_type = tswap16(target_efl->l_type);
6428 fl.l_whence = tswap16(target_efl->l_whence);
6429 fl.l_start = tswap64(target_efl->l_start);
6430 fl.l_len = tswap64(target_efl->l_len);
6431 fl.l_pid = tswapl(target_efl->l_pid);
6432 unlock_user_struct(target_efl, arg3, 0);
6436 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6438 fl.l_type = tswap16(target_fl->l_type);
6439 fl.l_whence = tswap16(target_fl->l_whence);
6440 fl.l_start = tswap64(target_fl->l_start);
6441 fl.l_len = tswap64(target_fl->l_len);
6442 fl.l_pid = tswapl(target_fl->l_pid);
6443 unlock_user_struct(target_fl, arg3, 0);
6445 ret = get_errno(fcntl(arg1, cmd, &fl));
6448 if (((CPUARMState *)cpu_env)->eabi) {
6449 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6451 target_efl->l_type = tswap16(fl.l_type);
6452 target_efl->l_whence = tswap16(fl.l_whence);
6453 target_efl->l_start = tswap64(fl.l_start);
6454 target_efl->l_len = tswap64(fl.l_len);
6455 target_efl->l_pid = tswapl(fl.l_pid);
6456 unlock_user_struct(target_efl, arg3, 1);
6460 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6462 target_fl->l_type = tswap16(fl.l_type);
6463 target_fl->l_whence = tswap16(fl.l_whence);
6464 target_fl->l_start = tswap64(fl.l_start);
6465 target_fl->l_len = tswap64(fl.l_len);
6466 target_fl->l_pid = tswapl(fl.l_pid);
6467 unlock_user_struct(target_fl, arg3, 1);
6472 case TARGET_F_SETLK64:
6473 case TARGET_F_SETLKW64:
6475 if (((CPUARMState *)cpu_env)->eabi) {
6476 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6478 fl.l_type = tswap16(target_efl->l_type);
6479 fl.l_whence = tswap16(target_efl->l_whence);
6480 fl.l_start = tswap64(target_efl->l_start);
6481 fl.l_len = tswap64(target_efl->l_len);
6482 fl.l_pid = tswapl(target_efl->l_pid);
6483 unlock_user_struct(target_efl, arg3, 0);
6487 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6489 fl.l_type = tswap16(target_fl->l_type);
6490 fl.l_whence = tswap16(target_fl->l_whence);
6491 fl.l_start = tswap64(target_fl->l_start);
6492 fl.l_len = tswap64(target_fl->l_len);
6493 fl.l_pid = tswapl(target_fl->l_pid);
6494 unlock_user_struct(target_fl, arg3, 0);
6496 ret = get_errno(fcntl(arg1, cmd, &fl));
6499 ret = do_fcntl(arg1, cmd, arg3);
6505 #ifdef TARGET_NR_cacheflush
6506 case TARGET_NR_cacheflush:
6507 /* self-modifying code is handled automatically, so nothing needed */
6511 #ifdef TARGET_NR_security
6512 case TARGET_NR_security:
6515 #ifdef TARGET_NR_getpagesize
6516 case TARGET_NR_getpagesize:
6517 ret = TARGET_PAGE_SIZE;
6520 case TARGET_NR_gettid:
6521 ret = get_errno(gettid());
6523 #ifdef TARGET_NR_readahead
6524 case TARGET_NR_readahead:
6525 #if TARGET_ABI_BITS == 32
6527 if (((CPUARMState *)cpu_env)->eabi)
6534 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6536 ret = get_errno(readahead(arg1, arg2, arg3));
6540 #ifdef TARGET_NR_setxattr
6541 case TARGET_NR_setxattr:
6542 case TARGET_NR_lsetxattr:
6543 case TARGET_NR_fsetxattr:
6544 case TARGET_NR_getxattr:
6545 case TARGET_NR_lgetxattr:
6546 case TARGET_NR_fgetxattr:
6547 case TARGET_NR_listxattr:
6548 case TARGET_NR_llistxattr:
6549 case TARGET_NR_flistxattr:
6550 case TARGET_NR_removexattr:
6551 case TARGET_NR_lremovexattr:
6552 case TARGET_NR_fremovexattr:
6553 goto unimplemented_nowarn;
6555 #ifdef TARGET_NR_set_thread_area
6556 case TARGET_NR_set_thread_area:
6557 #if defined(TARGET_MIPS)
6558 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6561 #elif defined(TARGET_CRIS)
6563 ret = -TARGET_EINVAL;
6565 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6569 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6570 ret = do_set_thread_area(cpu_env, arg1);
6573 goto unimplemented_nowarn;
6576 #ifdef TARGET_NR_get_thread_area
6577 case TARGET_NR_get_thread_area:
6578 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6579 ret = do_get_thread_area(cpu_env, arg1);
6581 goto unimplemented_nowarn;
6584 #ifdef TARGET_NR_getdomainname
6585 case TARGET_NR_getdomainname:
6586 goto unimplemented_nowarn;
6589 #ifdef TARGET_NR_clock_gettime
6590 case TARGET_NR_clock_gettime:
6593 ret = get_errno(clock_gettime(arg1, &ts));
6594 if (!is_error(ret)) {
6595 host_to_target_timespec(arg2, &ts);
6600 #ifdef TARGET_NR_clock_getres
6601 case TARGET_NR_clock_getres:
6604 ret = get_errno(clock_getres(arg1, &ts));
6605 if (!is_error(ret)) {
6606 host_to_target_timespec(arg2, &ts);
6611 #ifdef TARGET_NR_clock_nanosleep
6612 case TARGET_NR_clock_nanosleep:
6615 target_to_host_timespec(&ts, arg3);
6616 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6618 host_to_target_timespec(arg4, &ts);
6623 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6624 case TARGET_NR_set_tid_address:
6625 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6629 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6630 case TARGET_NR_tkill:
6631 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6635 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6636 case TARGET_NR_tgkill:
6637 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6638 target_to_host_signal(arg3)));
6642 #ifdef TARGET_NR_set_robust_list
6643 case TARGET_NR_set_robust_list:
6644 goto unimplemented_nowarn;
6647 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6648 case TARGET_NR_utimensat:
6650 struct timespec ts[2];
6651 target_to_host_timespec(ts, arg3);
6652 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6654 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6656 if (!(p = lock_user_string(arg2))) {
6657 ret = -TARGET_EFAULT;
6660 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6661 unlock_user(p, arg2, 0);
6666 #if defined(USE_NPTL)
6667 case TARGET_NR_futex:
6668 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6671 #ifdef TARGET_NR_inotify_init
6672 case TARGET_NR_inotify_init:
6673 ret = get_errno(sys_inotify_init());
6676 #ifdef TARGET_NR_inotify_add_watch
6677 case TARGET_NR_inotify_add_watch:
6678 p = lock_user_string(arg2);
6679 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6680 unlock_user(p, arg2, 0);
6683 #ifdef TARGET_NR_inotify_rm_watch
6684 case TARGET_NR_inotify_rm_watch:
6685 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6689 #ifdef TARGET_NR_mq_open
6690 case TARGET_NR_mq_open:
6692 struct mq_attr posix_mq_attr;
6694 p = lock_user_string(arg1 - 1);
6696 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6697 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6698 unlock_user (p, arg1, 0);
6702 case TARGET_NR_mq_unlink:
6703 p = lock_user_string(arg1 - 1);
6704 ret = get_errno(mq_unlink(p));
6705 unlock_user (p, arg1, 0);
6708 case TARGET_NR_mq_timedsend:
6712 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6714 target_to_host_timespec(&ts, arg5);
6715 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6716 host_to_target_timespec(arg5, &ts);
6719 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6720 unlock_user (p, arg2, arg3);
6724 case TARGET_NR_mq_timedreceive:
6729 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6731 target_to_host_timespec(&ts, arg5);
6732 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6733 host_to_target_timespec(arg5, &ts);
6736 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6737 unlock_user (p, arg2, arg3);
6739 put_user_u32(prio, arg4);
6743 /* Not implemented for now... */
6744 /* case TARGET_NR_mq_notify: */
6747 case TARGET_NR_mq_getsetattr:
6749 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6752 ret = mq_getattr(arg1, &posix_mq_attr_out);
6753 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6756 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6757 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6766 gemu_log("qemu: Unsupported syscall: %d\n", num);
6767 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6768 unimplemented_nowarn:
6770 ret = -TARGET_ENOSYS;
6775 gemu_log(" = %ld\n", ret);
6778 print_syscall_ret(num, ret);
6781 ret = -TARGET_EFAULT;