4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
80 #include "linux_loop.h"
83 #include "qemu-common.h"
86 #include <linux/futex.h>
87 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
88 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
90 /* XXX: Hardcode the above values. */
91 #define CLONE_NPTL_FLAGS2 0
96 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
97 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
98 /* 16 bit uid wrappers emulation */
102 //#include <linux/msdos_fs.h>
103 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
104 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
115 #define _syscall0(type,name) \
116 static type name (void) \
118 return syscall(__NR_##name); \
121 #define _syscall1(type,name,type1,arg1) \
122 static type name (type1 arg1) \
124 return syscall(__NR_##name, arg1); \
127 #define _syscall2(type,name,type1,arg1,type2,arg2) \
128 static type name (type1 arg1,type2 arg2) \
130 return syscall(__NR_##name, arg1, arg2); \
133 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
134 static type name (type1 arg1,type2 arg2,type3 arg3) \
136 return syscall(__NR_##name, arg1, arg2, arg3); \
139 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
140 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
142 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
145 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
147 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
149 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
153 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
154 type5,arg5,type6,arg6) \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
162 #define __NR_sys_uname __NR_uname
163 #define __NR_sys_faccessat __NR_faccessat
164 #define __NR_sys_fchmodat __NR_fchmodat
165 #define __NR_sys_fchownat __NR_fchownat
166 #define __NR_sys_fstatat64 __NR_fstatat64
167 #define __NR_sys_futimesat __NR_futimesat
168 #define __NR_sys_getcwd1 __NR_getcwd
169 #define __NR_sys_getdents __NR_getdents
170 #define __NR_sys_getdents64 __NR_getdents64
171 #define __NR_sys_getpriority __NR_getpriority
172 #define __NR_sys_linkat __NR_linkat
173 #define __NR_sys_mkdirat __NR_mkdirat
174 #define __NR_sys_mknodat __NR_mknodat
175 #define __NR_sys_newfstatat __NR_newfstatat
176 #define __NR_sys_openat __NR_openat
177 #define __NR_sys_readlinkat __NR_readlinkat
178 #define __NR_sys_renameat __NR_renameat
179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
180 #define __NR_sys_symlinkat __NR_symlinkat
181 #define __NR_sys_syslog __NR_syslog
182 #define __NR_sys_tgkill __NR_tgkill
183 #define __NR_sys_tkill __NR_tkill
184 #define __NR_sys_unlinkat __NR_unlinkat
185 #define __NR_sys_utimensat __NR_utimensat
186 #define __NR_sys_futex __NR_futex
187 #define __NR_sys_inotify_init __NR_inotify_init
188 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
189 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
191 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
192 #define __NR__llseek __NR_lseek
196 _syscall0(int, gettid)
198 /* This is a replacement for the host gettid() and must return a host
200 static int gettid(void) {
204 _syscall1(int,sys_uname,struct new_utsname *,buf)
205 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
206 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
208 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
209 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
210 mode_t,mode,int,flags)
212 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
213 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
214 uid_t,owner,gid_t,group,int,flags)
216 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
217 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
218 struct stat *,buf,int,flags)
220 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
221 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
222 const struct timeval *,times)
224 #if TARGET_ABI_BITS == 32
225 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
228 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
230 _syscall2(int, sys_getpriority, int, which, int, who);
231 #if !defined (__x86_64__)
232 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
233 loff_t *, res, uint, wh);
235 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
236 defined(__NR_newfstatat)
237 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
238 struct stat *,buf,int,flags)
240 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
241 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
242 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
243 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
245 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
246 _syscall2(int,sys_tkill,int,tid,int,sig)
248 #ifdef __NR_exit_group
249 _syscall1(int,exit_group,int,error_code)
251 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
252 _syscall1(int,set_tid_address,int *,tidptr)
254 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
255 _syscall0(int,sys_inotify_init)
257 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
258 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
260 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
261 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
263 #if defined(USE_NPTL)
264 #if defined(TARGET_NR_futex) && defined(__NR_futex)
265 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
266 const struct timespec *,timeout,int *,uaddr2,int,val3)
270 static bitmask_transtbl fcntl_flags_tbl[] = {
271 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
272 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
273 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
274 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
275 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
276 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
277 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
278 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
279 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
280 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
281 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
282 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
283 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
284 #if defined(O_DIRECT)
285 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
291 sys_uname(struct new_utsname *buf)
293 struct utsname uts_buf;
295 if (uname(&uts_buf) < 0)
299 * Just in case these have some differences, we
300 * translate utsname to new_utsname (which is the
301 * struct linux kernel uses).
304 #define COPY_UTSNAME_FIELD(dest, src) \
306 /* __NEW_UTS_LEN doesn't include terminating null */ \
307 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
308 (dest)[__NEW_UTS_LEN] = '\0'; \
311 bzero(buf, sizeof (*buf));
312 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
313 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
314 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
315 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
316 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
318 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
322 #undef COPY_UTSNAME_FIELD
326 sys_getcwd1(char *buf, size_t size)
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
338 * Host system seems to have atfile syscall stubs available. We
339 * now enable them one by one as specified by target syscall_nr.h.
342 #ifdef TARGET_NR_openat
344 sys_openat(int dirfd, const char *pathname, int flags, ...)
347 * open(2) has extra parameter 'mode' when called with
350 if ((flags & O_CREAT) != 0) {
355 * Get the 'mode' parameter and translate it to
359 mode = va_arg(ap, mode_t);
360 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
363 return (openat(dirfd, pathname, flags, mode));
365 return (openat(dirfd, pathname, flags));
369 #ifdef TARGET_NR_mkdirat
371 sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
373 return (mkdirat(dirfd, pathname, mode));
377 #ifdef TARGET_NR_mknodat
379 sys_mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev)
381 return (mknodat(dirfd, pathname, mode, dev));
385 #ifdef TARGET_NR_fchownat
387 sys_fchownat(int dirfd, const char *pathname, uid_t owner,
388 gid_t group, int flags)
390 return (fchownat(dirfd, pathname, owner, group, flags));
394 #ifdef TARGET_NR_fstatat
396 sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
399 return (fstatat64(dirfd, pathname, buf, flags));
403 #ifdef TARGET_NR_unlinkat
405 sys_unlinkat(int dirfd, const char *pathname, int flags)
407 return (unlinkat(dirfd, pathname, flags));
411 #ifdef TARGET_NR_renameat
413 sys_renameat(int olddirfd, const char *oldpath,
414 int newdirfd, const char *newpath)
416 return (renameat(olddirfd, oldpath, newdirfd, newpath));
420 #ifdef TARGET_NR_linkat
422 sys_linkat(int olddirfd, const char *oldpath,
423 int newdirfd, const char *newpath, int flags)
425 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
429 #ifdef TARGET_NR_symlinkat
431 sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
433 return (symlinkat(oldpath, newdirfd, newpath));
437 #ifdef TARGET_NR_readlinkat
439 sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
441 return (readlinkat(dirfd, pathname, buf, bufsiz));
445 #ifdef TARGET_NR_fchmodat
447 sys_fchmodat(int dirfd, const char *pathname, mode_t mode, int flags)
449 return (fchmodat(dirfd, pathname, mode, flags));
453 #ifdef TARGET_NR_faccessat
455 sys_faccessat(int dirfd, const char *pathname, int mode, int flags)
457 return (faccessat(dirfd, pathname, mode, flags));
461 #ifdef TARGET_NR_utimensat
463 sys_utimensat(int dirfd, const char *pathname,
464 const struct timespec times[2], int flags)
466 return (utimensat(dirfd, pathname, times, flags));
470 #else /* !CONFIG_ATFILE */
473 * Host system doesn't have these available so we don't try
477 #undef TARGET_NR_openat
478 #undef TARGET_NR_mkdirat
479 #undef TARGET_NR_mknodat
480 #undef TARGET_NR_fchownat
481 #undef TARGET_NR_fstatat
482 #undef TARGET_NR_unlinkat
483 #undef TARGET_NR_renameat
484 #undef TARGET_NR_linkat
485 #undef TARGET_NR_symlinkat
486 #undef TARGET_NR_readlinkat
487 #undef TARGET_NR_fchmodat
488 #undef TARGET_NR_faccessat
489 #undef TARGET_NR_utimensat
491 #endif /* CONFIG_ATFILE */
494 extern int personality(int);
495 extern int flock(int, int);
496 extern int setfsuid(int);
497 extern int setfsgid(int);
498 extern int setgroups(int, gid_t *);
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512 [EIDRM] = TARGET_EIDRM,
513 [ECHRNG] = TARGET_ECHRNG,
514 [EL2NSYNC] = TARGET_EL2NSYNC,
515 [EL3HLT] = TARGET_EL3HLT,
516 [EL3RST] = TARGET_EL3RST,
517 [ELNRNG] = TARGET_ELNRNG,
518 [EUNATCH] = TARGET_EUNATCH,
519 [ENOCSI] = TARGET_ENOCSI,
520 [EL2HLT] = TARGET_EL2HLT,
521 [EDEADLK] = TARGET_EDEADLK,
522 [ENOLCK] = TARGET_ENOLCK,
523 [EBADE] = TARGET_EBADE,
524 [EBADR] = TARGET_EBADR,
525 [EXFULL] = TARGET_EXFULL,
526 [ENOANO] = TARGET_ENOANO,
527 [EBADRQC] = TARGET_EBADRQC,
528 [EBADSLT] = TARGET_EBADSLT,
529 [EBFONT] = TARGET_EBFONT,
530 [ENOSTR] = TARGET_ENOSTR,
531 [ENODATA] = TARGET_ENODATA,
532 [ETIME] = TARGET_ETIME,
533 [ENOSR] = TARGET_ENOSR,
534 [ENONET] = TARGET_ENONET,
535 [ENOPKG] = TARGET_ENOPKG,
536 [EREMOTE] = TARGET_EREMOTE,
537 [ENOLINK] = TARGET_ENOLINK,
538 [EADV] = TARGET_EADV,
539 [ESRMNT] = TARGET_ESRMNT,
540 [ECOMM] = TARGET_ECOMM,
541 [EPROTO] = TARGET_EPROTO,
542 [EDOTDOT] = TARGET_EDOTDOT,
543 [EMULTIHOP] = TARGET_EMULTIHOP,
544 [EBADMSG] = TARGET_EBADMSG,
545 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
546 [EOVERFLOW] = TARGET_EOVERFLOW,
547 [ENOTUNIQ] = TARGET_ENOTUNIQ,
548 [EBADFD] = TARGET_EBADFD,
549 [EREMCHG] = TARGET_EREMCHG,
550 [ELIBACC] = TARGET_ELIBACC,
551 [ELIBBAD] = TARGET_ELIBBAD,
552 [ELIBSCN] = TARGET_ELIBSCN,
553 [ELIBMAX] = TARGET_ELIBMAX,
554 [ELIBEXEC] = TARGET_ELIBEXEC,
555 [EILSEQ] = TARGET_EILSEQ,
556 [ENOSYS] = TARGET_ENOSYS,
557 [ELOOP] = TARGET_ELOOP,
558 [ERESTART] = TARGET_ERESTART,
559 [ESTRPIPE] = TARGET_ESTRPIPE,
560 [ENOTEMPTY] = TARGET_ENOTEMPTY,
561 [EUSERS] = TARGET_EUSERS,
562 [ENOTSOCK] = TARGET_ENOTSOCK,
563 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
564 [EMSGSIZE] = TARGET_EMSGSIZE,
565 [EPROTOTYPE] = TARGET_EPROTOTYPE,
566 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
567 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
568 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
569 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
570 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
571 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
572 [EADDRINUSE] = TARGET_EADDRINUSE,
573 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
574 [ENETDOWN] = TARGET_ENETDOWN,
575 [ENETUNREACH] = TARGET_ENETUNREACH,
576 [ENETRESET] = TARGET_ENETRESET,
577 [ECONNABORTED] = TARGET_ECONNABORTED,
578 [ECONNRESET] = TARGET_ECONNRESET,
579 [ENOBUFS] = TARGET_ENOBUFS,
580 [EISCONN] = TARGET_EISCONN,
581 [ENOTCONN] = TARGET_ENOTCONN,
582 [EUCLEAN] = TARGET_EUCLEAN,
583 [ENOTNAM] = TARGET_ENOTNAM,
584 [ENAVAIL] = TARGET_ENAVAIL,
585 [EISNAM] = TARGET_EISNAM,
586 [EREMOTEIO] = TARGET_EREMOTEIO,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
600 [ENOKEY] = TARGET_ENOKEY,
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
619 static inline int host_to_target_errno(int err)
621 if(host_to_target_errno_table[err])
622 return host_to_target_errno_table[err];
626 static inline int target_to_host_errno(int err)
628 if (target_to_host_errno_table[err])
629 return target_to_host_errno_table[err];
633 static inline abi_long get_errno(abi_long ret)
636 return -host_to_target_errno(errno);
641 static inline int is_error(abi_long ret)
643 return (abi_ulong)ret >= (abi_ulong)(-4096);
646 char *target_strerror(int err)
648 return strerror(target_to_host_errno(err));
651 static abi_ulong target_brk;
652 static abi_ulong target_original_brk;
654 void target_set_brk(abi_ulong new_brk)
656 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
659 /* do_brk() must return target values and target errnos. */
660 abi_long do_brk(abi_ulong new_brk)
663 abi_long mapped_addr;
668 if (new_brk < target_original_brk)
671 brk_page = HOST_PAGE_ALIGN(target_brk);
673 /* If the new brk is less than this, set it and we're done... */
674 if (new_brk < brk_page) {
675 target_brk = new_brk;
679 /* We need to allocate more memory after the brk... */
680 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
681 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
682 PROT_READ|PROT_WRITE,
683 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
685 if (!is_error(mapped_addr))
686 target_brk = new_brk;
691 static inline abi_long copy_from_user_fdset(fd_set *fds,
692 abi_ulong target_fds_addr,
696 abi_ulong b, *target_fds;
698 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
699 if (!(target_fds = lock_user(VERIFY_READ,
701 sizeof(abi_ulong) * nw,
703 return -TARGET_EFAULT;
707 for (i = 0; i < nw; i++) {
708 /* grab the abi_ulong */
709 __get_user(b, &target_fds[i]);
710 for (j = 0; j < TARGET_ABI_BITS; j++) {
711 /* check the bit inside the abi_ulong */
718 unlock_user(target_fds, target_fds_addr, 0);
723 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
729 abi_ulong *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_WRITE,
734 sizeof(abi_ulong) * nw,
736 return -TARGET_EFAULT;
739 for (i = 0; i < nw; i++) {
741 for (j = 0; j < TARGET_ABI_BITS; j++) {
742 v |= ((FD_ISSET(k, fds) != 0) << j);
745 __put_user(v, &target_fds[i]);
748 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
753 #if defined(__alpha__)
759 static inline abi_long host_to_target_clock_t(long ticks)
761 #if HOST_HZ == TARGET_HZ
764 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
768 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
769 const struct rusage *rusage)
771 struct target_rusage *target_rusage;
773 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
774 return -TARGET_EFAULT;
775 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
776 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
777 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
778 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
779 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
780 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
781 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
782 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
783 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
784 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
785 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
786 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
787 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
788 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
789 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
790 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
791 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
792 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
793 unlock_user_struct(target_rusage, target_addr, 1);
798 static inline abi_long copy_from_user_timeval(struct timeval *tv,
799 abi_ulong target_tv_addr)
801 struct target_timeval *target_tv;
803 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
804 return -TARGET_EFAULT;
806 __get_user(tv->tv_sec, &target_tv->tv_sec);
807 __get_user(tv->tv_usec, &target_tv->tv_usec);
809 unlock_user_struct(target_tv, target_tv_addr, 0);
814 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
815 const struct timeval *tv)
817 struct target_timeval *target_tv;
819 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
820 return -TARGET_EFAULT;
822 __put_user(tv->tv_sec, &target_tv->tv_sec);
823 __put_user(tv->tv_usec, &target_tv->tv_usec);
825 unlock_user_struct(target_tv, target_tv_addr, 1);
830 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
831 abi_ulong target_mq_attr_addr)
833 struct target_mq_attr *target_mq_attr;
835 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
836 target_mq_attr_addr, 1))
837 return -TARGET_EFAULT;
839 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
840 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
841 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
842 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
844 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
849 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
850 const struct mq_attr *attr)
852 struct target_mq_attr *target_mq_attr;
854 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
855 target_mq_attr_addr, 0))
856 return -TARGET_EFAULT;
858 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
859 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
860 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
861 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
863 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
868 /* do_select() must return target values and target errnos. */
869 static abi_long do_select(int n,
870 abi_ulong rfd_addr, abi_ulong wfd_addr,
871 abi_ulong efd_addr, abi_ulong target_tv_addr)
873 fd_set rfds, wfds, efds;
874 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
875 struct timeval tv, *tv_ptr;
879 if (copy_from_user_fdset(&rfds, rfd_addr, n))
880 return -TARGET_EFAULT;
886 if (copy_from_user_fdset(&wfds, wfd_addr, n))
887 return -TARGET_EFAULT;
893 if (copy_from_user_fdset(&efds, efd_addr, n))
894 return -TARGET_EFAULT;
900 if (target_tv_addr) {
901 if (copy_from_user_timeval(&tv, target_tv_addr))
902 return -TARGET_EFAULT;
908 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
910 if (!is_error(ret)) {
911 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
912 return -TARGET_EFAULT;
913 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
914 return -TARGET_EFAULT;
915 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
916 return -TARGET_EFAULT;
918 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
919 return -TARGET_EFAULT;
925 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
926 abi_ulong target_addr,
929 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
930 sa_family_t sa_family;
931 struct target_sockaddr *target_saddr;
933 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
935 return -TARGET_EFAULT;
937 sa_family = tswap16(target_saddr->sa_family);
939 /* Oops. The caller might send a incomplete sun_path; sun_path
940 * must be terminated by \0 (see the manual page), but
941 * unfortunately it is quite common to specify sockaddr_un
942 * length as "strlen(x->sun_path)" while it should be
943 * "strlen(...) + 1". We'll fix that here if needed.
944 * Linux kernel has a similar feature.
947 if (sa_family == AF_UNIX) {
948 if (len < unix_maxlen) {
949 char *cp = (char*)target_saddr;
951 if ( cp[len-1] && !cp[len] )
954 if (len > unix_maxlen)
958 memcpy(addr, target_saddr, len);
959 addr->sa_family = sa_family;
960 unlock_user(target_saddr, target_addr, 0);
965 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
966 struct sockaddr *addr,
969 struct target_sockaddr *target_saddr;
971 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
973 return -TARGET_EFAULT;
974 memcpy(target_saddr, addr, len);
975 target_saddr->sa_family = tswap16(addr->sa_family);
976 unlock_user(target_saddr, target_addr, len);
981 /* ??? Should this also swap msgh->name? */
982 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
983 struct target_msghdr *target_msgh)
985 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
986 abi_long msg_controllen;
987 abi_ulong target_cmsg_addr;
988 struct target_cmsghdr *target_cmsg;
991 msg_controllen = tswapl(target_msgh->msg_controllen);
992 if (msg_controllen < sizeof (struct target_cmsghdr))
994 target_cmsg_addr = tswapl(target_msgh->msg_control);
995 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
997 return -TARGET_EFAULT;
999 while (cmsg && target_cmsg) {
1000 void *data = CMSG_DATA(cmsg);
1001 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1003 int len = tswapl(target_cmsg->cmsg_len)
1004 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1006 space += CMSG_SPACE(len);
1007 if (space > msgh->msg_controllen) {
1008 space -= CMSG_SPACE(len);
1009 gemu_log("Host cmsg overflow\n");
1013 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1014 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1015 cmsg->cmsg_len = CMSG_LEN(len);
1017 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1018 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1019 memcpy(data, target_data, len);
1021 int *fd = (int *)data;
1022 int *target_fd = (int *)target_data;
1023 int i, numfds = len / sizeof(int);
1025 for (i = 0; i < numfds; i++)
1026 fd[i] = tswap32(target_fd[i]);
1029 cmsg = CMSG_NXTHDR(msgh, cmsg);
1030 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1032 unlock_user(target_cmsg, target_cmsg_addr, 0);
1034 msgh->msg_controllen = space;
1038 /* ??? Should this also swap msgh->name? */
1039 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1040 struct msghdr *msgh)
1042 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1043 abi_long msg_controllen;
1044 abi_ulong target_cmsg_addr;
1045 struct target_cmsghdr *target_cmsg;
1046 socklen_t space = 0;
1048 msg_controllen = tswapl(target_msgh->msg_controllen);
1049 if (msg_controllen < sizeof (struct target_cmsghdr))
1051 target_cmsg_addr = tswapl(target_msgh->msg_control);
1052 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1054 return -TARGET_EFAULT;
1056 while (cmsg && target_cmsg) {
1057 void *data = CMSG_DATA(cmsg);
1058 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1060 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1062 space += TARGET_CMSG_SPACE(len);
1063 if (space > msg_controllen) {
1064 space -= TARGET_CMSG_SPACE(len);
1065 gemu_log("Target cmsg overflow\n");
1069 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1070 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1071 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1073 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1074 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1075 memcpy(target_data, data, len);
1077 int *fd = (int *)data;
1078 int *target_fd = (int *)target_data;
1079 int i, numfds = len / sizeof(int);
1081 for (i = 0; i < numfds; i++)
1082 target_fd[i] = tswap32(fd[i]);
1085 cmsg = CMSG_NXTHDR(msgh, cmsg);
1086 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1088 unlock_user(target_cmsg, target_cmsg_addr, space);
1090 target_msgh->msg_controllen = tswapl(space);
1094 /* do_setsockopt() Must return target values and target errnos. */
1095 static abi_long do_setsockopt(int sockfd, int level, int optname,
1096 abi_ulong optval_addr, socklen_t optlen)
1103 /* TCP options all take an 'int' value. */
1104 if (optlen < sizeof(uint32_t))
1105 return -TARGET_EINVAL;
1107 if (get_user_u32(val, optval_addr))
1108 return -TARGET_EFAULT;
1109 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1116 case IP_ROUTER_ALERT:
1120 case IP_MTU_DISCOVER:
1126 case IP_MULTICAST_TTL:
1127 case IP_MULTICAST_LOOP:
1129 if (optlen >= sizeof(uint32_t)) {
1130 if (get_user_u32(val, optval_addr))
1131 return -TARGET_EFAULT;
1132 } else if (optlen >= 1) {
1133 if (get_user_u8(val, optval_addr))
1134 return -TARGET_EFAULT;
1136 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1142 case TARGET_SOL_SOCKET:
1144 /* Options with 'int' argument. */
1145 case TARGET_SO_DEBUG:
1148 case TARGET_SO_REUSEADDR:
1149 optname = SO_REUSEADDR;
1151 case TARGET_SO_TYPE:
1154 case TARGET_SO_ERROR:
1157 case TARGET_SO_DONTROUTE:
1158 optname = SO_DONTROUTE;
1160 case TARGET_SO_BROADCAST:
1161 optname = SO_BROADCAST;
1163 case TARGET_SO_SNDBUF:
1164 optname = SO_SNDBUF;
1166 case TARGET_SO_RCVBUF:
1167 optname = SO_RCVBUF;
1169 case TARGET_SO_KEEPALIVE:
1170 optname = SO_KEEPALIVE;
1172 case TARGET_SO_OOBINLINE:
1173 optname = SO_OOBINLINE;
1175 case TARGET_SO_NO_CHECK:
1176 optname = SO_NO_CHECK;
1178 case TARGET_SO_PRIORITY:
1179 optname = SO_PRIORITY;
1182 case TARGET_SO_BSDCOMPAT:
1183 optname = SO_BSDCOMPAT;
1186 case TARGET_SO_PASSCRED:
1187 optname = SO_PASSCRED;
1189 case TARGET_SO_TIMESTAMP:
1190 optname = SO_TIMESTAMP;
1192 case TARGET_SO_RCVLOWAT:
1193 optname = SO_RCVLOWAT;
1195 case TARGET_SO_RCVTIMEO:
1196 optname = SO_RCVTIMEO;
1198 case TARGET_SO_SNDTIMEO:
1199 optname = SO_SNDTIMEO;
1205 if (optlen < sizeof(uint32_t))
1206 return -TARGET_EINVAL;
1208 if (get_user_u32(val, optval_addr))
1209 return -TARGET_EFAULT;
1210 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1214 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1215 ret = -TARGET_ENOPROTOOPT;
1220 /* do_getsockopt() Must return target values and target errnos. */
1221 static abi_long do_getsockopt(int sockfd, int level, int optname,
1222 abi_ulong optval_addr, abi_ulong optlen)
1229 case TARGET_SOL_SOCKET:
1232 case TARGET_SO_LINGER:
1233 case TARGET_SO_RCVTIMEO:
1234 case TARGET_SO_SNDTIMEO:
1235 case TARGET_SO_PEERCRED:
1236 case TARGET_SO_PEERNAME:
1237 /* These don't just return a single integer */
1244 /* TCP options all take an 'int' value. */
1246 if (get_user_u32(len, optlen))
1247 return -TARGET_EFAULT;
1249 return -TARGET_EINVAL;
1251 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1258 if (put_user_u32(val, optval_addr))
1259 return -TARGET_EFAULT;
1261 if (put_user_u8(val, optval_addr))
1262 return -TARGET_EFAULT;
1264 if (put_user_u32(len, optlen))
1265 return -TARGET_EFAULT;
1272 case IP_ROUTER_ALERT:
1276 case IP_MTU_DISCOVER:
1282 case IP_MULTICAST_TTL:
1283 case IP_MULTICAST_LOOP:
1284 if (get_user_u32(len, optlen))
1285 return -TARGET_EFAULT;
1287 return -TARGET_EINVAL;
1289 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1292 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1294 if (put_user_u32(len, optlen)
1295 || put_user_u8(val, optval_addr))
1296 return -TARGET_EFAULT;
1298 if (len > sizeof(int))
1300 if (put_user_u32(len, optlen)
1301 || put_user_u32(val, optval_addr))
1302 return -TARGET_EFAULT;
1306 ret = -TARGET_ENOPROTOOPT;
1312 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1314 ret = -TARGET_EOPNOTSUPP;
1321 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1322 * other lock functions have a return code of 0 for failure.
1324 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1325 int count, int copy)
1327 struct target_iovec *target_vec;
1331 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1333 return -TARGET_EFAULT;
1334 for(i = 0;i < count; i++) {
1335 base = tswapl(target_vec[i].iov_base);
1336 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1337 if (vec[i].iov_len != 0) {
1338 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1339 /* Don't check lock_user return value. We must call writev even
1340 if a element has invalid base address. */
1342 /* zero length pointer is ignored */
1343 vec[i].iov_base = NULL;
1346 unlock_user (target_vec, target_addr, 0);
1350 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1351 int count, int copy)
1353 struct target_iovec *target_vec;
1357 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1359 return -TARGET_EFAULT;
1360 for(i = 0;i < count; i++) {
1361 if (target_vec[i].iov_base) {
1362 base = tswapl(target_vec[i].iov_base);
1363 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1366 unlock_user (target_vec, target_addr, 0);
1371 /* do_socket() Must return target values and target errnos. */
1372 static abi_long do_socket(int domain, int type, int protocol)
1374 #if defined(TARGET_MIPS)
1376 case TARGET_SOCK_DGRAM:
1379 case TARGET_SOCK_STREAM:
1382 case TARGET_SOCK_RAW:
1385 case TARGET_SOCK_RDM:
1388 case TARGET_SOCK_SEQPACKET:
1389 type = SOCK_SEQPACKET;
1391 case TARGET_SOCK_PACKET:
1396 if (domain == PF_NETLINK)
1397 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1398 return get_errno(socket(domain, type, protocol));
1401 /* MAX_SOCK_ADDR from linux/net/socket.c */
1402 #define MAX_SOCK_ADDR 128
1404 /* do_bind() Must return target values and target errnos. */
1405 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1410 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1411 return -TARGET_EINVAL;
1413 addr = alloca(addrlen+1);
1415 target_to_host_sockaddr(addr, target_addr, addrlen);
1416 return get_errno(bind(sockfd, addr, addrlen));
1419 /* do_connect() Must return target values and target errnos. */
1420 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1425 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1426 return -TARGET_EINVAL;
1428 addr = alloca(addrlen);
1430 target_to_host_sockaddr(addr, target_addr, addrlen);
1431 return get_errno(connect(sockfd, addr, addrlen));
1434 /* do_sendrecvmsg() Must return target values and target errnos. */
1435 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1436 int flags, int send)
1439 struct target_msghdr *msgp;
1443 abi_ulong target_vec;
1446 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1450 return -TARGET_EFAULT;
1451 if (msgp->msg_name) {
1452 msg.msg_namelen = tswap32(msgp->msg_namelen);
1453 msg.msg_name = alloca(msg.msg_namelen);
1454 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1457 msg.msg_name = NULL;
1458 msg.msg_namelen = 0;
1460 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1461 msg.msg_control = alloca(msg.msg_controllen);
1462 msg.msg_flags = tswap32(msgp->msg_flags);
1464 count = tswapl(msgp->msg_iovlen);
1465 vec = alloca(count * sizeof(struct iovec));
1466 target_vec = tswapl(msgp->msg_iov);
1467 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1468 msg.msg_iovlen = count;
1472 ret = target_to_host_cmsg(&msg, msgp);
1474 ret = get_errno(sendmsg(fd, &msg, flags));
1476 ret = get_errno(recvmsg(fd, &msg, flags));
1477 if (!is_error(ret)) {
1479 ret = host_to_target_cmsg(msgp, &msg);
1484 unlock_iovec(vec, target_vec, count, !send);
1485 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1489 /* do_accept() Must return target values and target errnos. */
1490 static abi_long do_accept(int fd, abi_ulong target_addr,
1491 abi_ulong target_addrlen_addr)
1497 if (target_addr == 0)
1498 return get_errno(accept(fd, NULL, NULL));
1500 if (get_user_u32(addrlen, target_addrlen_addr))
1501 return -TARGET_EFAULT;
1503 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1504 return -TARGET_EINVAL;
1506 addr = alloca(addrlen);
1508 ret = get_errno(accept(fd, addr, &addrlen));
1509 if (!is_error(ret)) {
1510 host_to_target_sockaddr(target_addr, addr, addrlen);
1511 if (put_user_u32(addrlen, target_addrlen_addr))
1512 ret = -TARGET_EFAULT;
1517 /* do_getpeername() Must return target values and target errnos. */
1518 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1519 abi_ulong target_addrlen_addr)
1525 if (get_user_u32(addrlen, target_addrlen_addr))
1526 return -TARGET_EFAULT;
1528 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1529 return -TARGET_EINVAL;
1531 addr = alloca(addrlen);
1533 ret = get_errno(getpeername(fd, addr, &addrlen));
1534 if (!is_error(ret)) {
1535 host_to_target_sockaddr(target_addr, addr, addrlen);
1536 if (put_user_u32(addrlen, target_addrlen_addr))
1537 ret = -TARGET_EFAULT;
1542 /* do_getsockname() Must return target values and target errnos. */
1543 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1544 abi_ulong target_addrlen_addr)
1550 if (target_addr == 0)
1551 return get_errno(accept(fd, NULL, NULL));
1553 if (get_user_u32(addrlen, target_addrlen_addr))
1554 return -TARGET_EFAULT;
1556 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1557 return -TARGET_EINVAL;
1559 addr = alloca(addrlen);
1561 ret = get_errno(getsockname(fd, addr, &addrlen));
1562 if (!is_error(ret)) {
1563 host_to_target_sockaddr(target_addr, addr, addrlen);
1564 if (put_user_u32(addrlen, target_addrlen_addr))
1565 ret = -TARGET_EFAULT;
1570 /* do_socketpair() Must return target values and target errnos. */
1571 static abi_long do_socketpair(int domain, int type, int protocol,
1572 abi_ulong target_tab_addr)
1577 ret = get_errno(socketpair(domain, type, protocol, tab));
1578 if (!is_error(ret)) {
1579 if (put_user_s32(tab[0], target_tab_addr)
1580 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1581 ret = -TARGET_EFAULT;
1586 /* do_sendto() Must return target values and target errnos. */
1587 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1588 abi_ulong target_addr, socklen_t addrlen)
1594 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1595 return -TARGET_EINVAL;
1597 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1599 return -TARGET_EFAULT;
1601 addr = alloca(addrlen);
1602 target_to_host_sockaddr(addr, target_addr, addrlen);
1603 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1605 ret = get_errno(send(fd, host_msg, len, flags));
1607 unlock_user(host_msg, msg, 0);
1611 /* do_recvfrom() Must return target values and target errnos. */
1612 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1613 abi_ulong target_addr,
1614 abi_ulong target_addrlen)
1621 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1623 return -TARGET_EFAULT;
1625 if (get_user_u32(addrlen, target_addrlen)) {
1626 ret = -TARGET_EFAULT;
1629 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1630 ret = -TARGET_EINVAL;
1633 addr = alloca(addrlen);
1634 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1636 addr = NULL; /* To keep compiler quiet. */
1637 ret = get_errno(recv(fd, host_msg, len, flags));
1639 if (!is_error(ret)) {
1641 host_to_target_sockaddr(target_addr, addr, addrlen);
1642 if (put_user_u32(addrlen, target_addrlen)) {
1643 ret = -TARGET_EFAULT;
1647 unlock_user(host_msg, msg, len);
1650 unlock_user(host_msg, msg, 0);
1655 #ifdef TARGET_NR_socketcall
1656 /* do_socketcall() Must return target values and target errnos. */
1657 static abi_long do_socketcall(int num, abi_ulong vptr)
1660 const int n = sizeof(abi_ulong);
1665 int domain, type, protocol;
1667 if (get_user_s32(domain, vptr)
1668 || get_user_s32(type, vptr + n)
1669 || get_user_s32(protocol, vptr + 2 * n))
1670 return -TARGET_EFAULT;
1672 ret = do_socket(domain, type, protocol);
1678 abi_ulong target_addr;
1681 if (get_user_s32(sockfd, vptr)
1682 || get_user_ual(target_addr, vptr + n)
1683 || get_user_u32(addrlen, vptr + 2 * n))
1684 return -TARGET_EFAULT;
1686 ret = do_bind(sockfd, target_addr, addrlen);
1689 case SOCKOP_connect:
1692 abi_ulong target_addr;
1695 if (get_user_s32(sockfd, vptr)
1696 || get_user_ual(target_addr, vptr + n)
1697 || get_user_u32(addrlen, vptr + 2 * n))
1698 return -TARGET_EFAULT;
1700 ret = do_connect(sockfd, target_addr, addrlen);
1705 int sockfd, backlog;
1707 if (get_user_s32(sockfd, vptr)
1708 || get_user_s32(backlog, vptr + n))
1709 return -TARGET_EFAULT;
1711 ret = get_errno(listen(sockfd, backlog));
1717 abi_ulong target_addr, target_addrlen;
1719 if (get_user_s32(sockfd, vptr)
1720 || get_user_ual(target_addr, vptr + n)
1721 || get_user_u32(target_addrlen, vptr + 2 * n))
1722 return -TARGET_EFAULT;
1724 ret = do_accept(sockfd, target_addr, target_addrlen);
1727 case SOCKOP_getsockname:
1730 abi_ulong target_addr, target_addrlen;
1732 if (get_user_s32(sockfd, vptr)
1733 || get_user_ual(target_addr, vptr + n)
1734 || get_user_u32(target_addrlen, vptr + 2 * n))
1735 return -TARGET_EFAULT;
1737 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1740 case SOCKOP_getpeername:
1743 abi_ulong target_addr, target_addrlen;
1745 if (get_user_s32(sockfd, vptr)
1746 || get_user_ual(target_addr, vptr + n)
1747 || get_user_u32(target_addrlen, vptr + 2 * n))
1748 return -TARGET_EFAULT;
1750 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1753 case SOCKOP_socketpair:
1755 int domain, type, protocol;
1758 if (get_user_s32(domain, vptr)
1759 || get_user_s32(type, vptr + n)
1760 || get_user_s32(protocol, vptr + 2 * n)
1761 || get_user_ual(tab, vptr + 3 * n))
1762 return -TARGET_EFAULT;
1764 ret = do_socketpair(domain, type, protocol, tab);
1774 if (get_user_s32(sockfd, vptr)
1775 || get_user_ual(msg, vptr + n)
1776 || get_user_ual(len, vptr + 2 * n)
1777 || get_user_s32(flags, vptr + 3 * n))
1778 return -TARGET_EFAULT;
1780 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1790 if (get_user_s32(sockfd, vptr)
1791 || get_user_ual(msg, vptr + n)
1792 || get_user_ual(len, vptr + 2 * n)
1793 || get_user_s32(flags, vptr + 3 * n))
1794 return -TARGET_EFAULT;
1796 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1808 if (get_user_s32(sockfd, vptr)
1809 || get_user_ual(msg, vptr + n)
1810 || get_user_ual(len, vptr + 2 * n)
1811 || get_user_s32(flags, vptr + 3 * n)
1812 || get_user_ual(addr, vptr + 4 * n)
1813 || get_user_u32(addrlen, vptr + 5 * n))
1814 return -TARGET_EFAULT;
1816 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1819 case SOCKOP_recvfrom:
1828 if (get_user_s32(sockfd, vptr)
1829 || get_user_ual(msg, vptr + n)
1830 || get_user_ual(len, vptr + 2 * n)
1831 || get_user_s32(flags, vptr + 3 * n)
1832 || get_user_ual(addr, vptr + 4 * n)
1833 || get_user_u32(addrlen, vptr + 5 * n))
1834 return -TARGET_EFAULT;
1836 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1839 case SOCKOP_shutdown:
1843 if (get_user_s32(sockfd, vptr)
1844 || get_user_s32(how, vptr + n))
1845 return -TARGET_EFAULT;
1847 ret = get_errno(shutdown(sockfd, how));
1850 case SOCKOP_sendmsg:
1851 case SOCKOP_recvmsg:
1854 abi_ulong target_msg;
1857 if (get_user_s32(fd, vptr)
1858 || get_user_ual(target_msg, vptr + n)
1859 || get_user_s32(flags, vptr + 2 * n))
1860 return -TARGET_EFAULT;
1862 ret = do_sendrecvmsg(fd, target_msg, flags,
1863 (num == SOCKOP_sendmsg));
1866 case SOCKOP_setsockopt:
1874 if (get_user_s32(sockfd, vptr)
1875 || get_user_s32(level, vptr + n)
1876 || get_user_s32(optname, vptr + 2 * n)
1877 || get_user_ual(optval, vptr + 3 * n)
1878 || get_user_u32(optlen, vptr + 4 * n))
1879 return -TARGET_EFAULT;
1881 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1884 case SOCKOP_getsockopt:
1892 if (get_user_s32(sockfd, vptr)
1893 || get_user_s32(level, vptr + n)
1894 || get_user_s32(optname, vptr + 2 * n)
1895 || get_user_ual(optval, vptr + 3 * n)
1896 || get_user_u32(optlen, vptr + 4 * n))
1897 return -TARGET_EFAULT;
1899 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1903 gemu_log("Unsupported socketcall: %d\n", num);
1904 ret = -TARGET_ENOSYS;
1911 #define N_SHM_REGIONS 32
1913 static struct shm_region {
1916 } shm_regions[N_SHM_REGIONS];
1918 struct target_ipc_perm
1925 unsigned short int mode;
1926 unsigned short int __pad1;
1927 unsigned short int __seq;
1928 unsigned short int __pad2;
1929 abi_ulong __unused1;
1930 abi_ulong __unused2;
1933 struct target_semid_ds
1935 struct target_ipc_perm sem_perm;
1936 abi_ulong sem_otime;
1937 abi_ulong __unused1;
1938 abi_ulong sem_ctime;
1939 abi_ulong __unused2;
1940 abi_ulong sem_nsems;
1941 abi_ulong __unused3;
1942 abi_ulong __unused4;
1945 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1946 abi_ulong target_addr)
1948 struct target_ipc_perm *target_ip;
1949 struct target_semid_ds *target_sd;
1951 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1952 return -TARGET_EFAULT;
1953 target_ip=&(target_sd->sem_perm);
1954 host_ip->__key = tswapl(target_ip->__key);
1955 host_ip->uid = tswapl(target_ip->uid);
1956 host_ip->gid = tswapl(target_ip->gid);
1957 host_ip->cuid = tswapl(target_ip->cuid);
1958 host_ip->cgid = tswapl(target_ip->cgid);
1959 host_ip->mode = tswapl(target_ip->mode);
1960 unlock_user_struct(target_sd, target_addr, 0);
1964 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1965 struct ipc_perm *host_ip)
1967 struct target_ipc_perm *target_ip;
1968 struct target_semid_ds *target_sd;
1970 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1971 return -TARGET_EFAULT;
1972 target_ip = &(target_sd->sem_perm);
1973 target_ip->__key = tswapl(host_ip->__key);
1974 target_ip->uid = tswapl(host_ip->uid);
1975 target_ip->gid = tswapl(host_ip->gid);
1976 target_ip->cuid = tswapl(host_ip->cuid);
1977 target_ip->cgid = tswapl(host_ip->cgid);
1978 target_ip->mode = tswapl(host_ip->mode);
1979 unlock_user_struct(target_sd, target_addr, 1);
1983 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1984 abi_ulong target_addr)
1986 struct target_semid_ds *target_sd;
1988 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1989 return -TARGET_EFAULT;
1990 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
1991 return -TARGET_EFAULT;
1992 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1993 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1994 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1995 unlock_user_struct(target_sd, target_addr, 0);
1999 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2000 struct semid_ds *host_sd)
2002 struct target_semid_ds *target_sd;
2004 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2005 return -TARGET_EFAULT;
2006 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2007 return -TARGET_EFAULT;;
2008 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2009 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2010 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2011 unlock_user_struct(target_sd, target_addr, 1);
2015 struct target_seminfo {
2028 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2029 struct seminfo *host_seminfo)
2031 struct target_seminfo *target_seminfo;
2032 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2033 return -TARGET_EFAULT;
2034 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2035 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2036 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2037 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2038 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2039 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2040 __put_user(host_seminfo->semume, &target_seminfo->semume);
2041 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2042 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2043 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2044 unlock_user_struct(target_seminfo, target_addr, 1);
2050 struct semid_ds *buf;
2051 unsigned short *array;
2052 struct seminfo *__buf;
2055 union target_semun {
2062 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2063 abi_ulong target_addr)
2066 unsigned short *array;
2068 struct semid_ds semid_ds;
2071 semun.buf = &semid_ds;
2073 ret = semctl(semid, 0, IPC_STAT, semun);
2075 return get_errno(ret);
2077 nsems = semid_ds.sem_nsems;
2079 *host_array = malloc(nsems*sizeof(unsigned short));
2080 array = lock_user(VERIFY_READ, target_addr,
2081 nsems*sizeof(unsigned short), 1);
2083 return -TARGET_EFAULT;
2085 for(i=0; i<nsems; i++) {
2086 __get_user((*host_array)[i], &array[i]);
2088 unlock_user(array, target_addr, 0);
2093 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2094 unsigned short **host_array)
2097 unsigned short *array;
2099 struct semid_ds semid_ds;
2102 semun.buf = &semid_ds;
2104 ret = semctl(semid, 0, IPC_STAT, semun);
2106 return get_errno(ret);
2108 nsems = semid_ds.sem_nsems;
2110 array = lock_user(VERIFY_WRITE, target_addr,
2111 nsems*sizeof(unsigned short), 0);
2113 return -TARGET_EFAULT;
2115 for(i=0; i<nsems; i++) {
2116 __put_user((*host_array)[i], &array[i]);
2119 unlock_user(array, target_addr, 1);
2124 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2125 union target_semun target_su)
2128 struct semid_ds dsarg;
2129 unsigned short *array;
2130 struct seminfo seminfo;
2131 abi_long ret = -TARGET_EINVAL;
2140 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2144 ret = get_errno(semctl(semid, semnum, cmd, arg));
2145 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2151 arg.val = tswapl(target_su.val);
2152 ret = get_errno(semctl(semid, semnum, cmd, arg));
2153 target_su.val = tswapl(arg.val);
2157 err = target_to_host_semarray(semid, &array, target_su.array);
2161 ret = get_errno(semctl(semid, semnum, cmd, arg));
2162 err = host_to_target_semarray(semid, target_su.array, &array);
2168 arg.__buf = &seminfo;
2169 ret = get_errno(semctl(semid, semnum, cmd, arg));
2170 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2178 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2185 struct target_sembuf {
2186 unsigned short sem_num;
2191 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2192 abi_ulong target_addr,
2195 struct target_sembuf *target_sembuf;
2198 target_sembuf = lock_user(VERIFY_READ, target_addr,
2199 nsops*sizeof(struct target_sembuf), 1);
2201 return -TARGET_EFAULT;
2203 for(i=0; i<nsops; i++) {
2204 __put_user(target_sembuf[i].sem_num, &host_sembuf[i].sem_num);
2205 __put_user(target_sembuf[i].sem_op, &host_sembuf[i].sem_op);
2206 __put_user(target_sembuf[i].sem_flg, &host_sembuf[i].sem_flg);
2209 unlock_user(target_sembuf, target_addr, 0);
2214 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2216 struct sembuf sops[nsops];
2218 if (target_to_host_sembuf(sops, ptr, nsops))
2219 return -TARGET_EFAULT;
2221 return semop(semid, sops, nsops);
2224 struct target_msqid_ds
2226 struct target_ipc_perm msg_perm;
2227 abi_ulong msg_stime;
2228 #if TARGET_ABI_BITS == 32
2229 abi_ulong __unused1;
2231 abi_ulong msg_rtime;
2232 #if TARGET_ABI_BITS == 32
2233 abi_ulong __unused2;
2235 abi_ulong msg_ctime;
2236 #if TARGET_ABI_BITS == 32
2237 abi_ulong __unused3;
2239 abi_ulong __msg_cbytes;
2241 abi_ulong msg_qbytes;
2242 abi_ulong msg_lspid;
2243 abi_ulong msg_lrpid;
2244 abi_ulong __unused4;
2245 abi_ulong __unused5;
2248 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2249 abi_ulong target_addr)
2251 struct target_msqid_ds *target_md;
2253 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2254 return -TARGET_EFAULT;
2255 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2256 return -TARGET_EFAULT;
2257 host_md->msg_stime = tswapl(target_md->msg_stime);
2258 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2259 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2260 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2261 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2262 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2263 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2264 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2265 unlock_user_struct(target_md, target_addr, 0);
2269 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2270 struct msqid_ds *host_md)
2272 struct target_msqid_ds *target_md;
2274 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2275 return -TARGET_EFAULT;
2276 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2277 return -TARGET_EFAULT;
2278 target_md->msg_stime = tswapl(host_md->msg_stime);
2279 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2280 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2281 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2282 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2283 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2284 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2285 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2286 unlock_user_struct(target_md, target_addr, 1);
2290 struct target_msginfo {
2298 unsigned short int msgseg;
2301 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2302 struct msginfo *host_msginfo)
2304 struct target_msginfo *target_msginfo;
2305 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2306 return -TARGET_EFAULT;
2307 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2308 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2309 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2310 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2311 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2312 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2313 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2314 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2315 unlock_user_struct(target_msginfo, target_addr, 1);
2319 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2321 struct msqid_ds dsarg;
2322 struct msginfo msginfo;
2323 abi_long ret = -TARGET_EINVAL;
2331 if (target_to_host_msqid_ds(&dsarg,ptr))
2332 return -TARGET_EFAULT;
2333 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2334 if (host_to_target_msqid_ds(ptr,&dsarg))
2335 return -TARGET_EFAULT;
2338 ret = get_errno(msgctl(msgid, cmd, NULL));
2342 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2343 if (host_to_target_msginfo(ptr, &msginfo))
2344 return -TARGET_EFAULT;
2351 struct target_msgbuf {
2356 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2357 unsigned int msgsz, int msgflg)
2359 struct target_msgbuf *target_mb;
2360 struct msgbuf *host_mb;
2363 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2364 return -TARGET_EFAULT;
2365 host_mb = malloc(msgsz+sizeof(long));
2366 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2367 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2368 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2370 unlock_user_struct(target_mb, msgp, 0);
2375 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2376 unsigned int msgsz, abi_long msgtyp,
2379 struct target_msgbuf *target_mb;
2381 struct msgbuf *host_mb;
2384 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2385 return -TARGET_EFAULT;
2387 host_mb = malloc(msgsz+sizeof(long));
2388 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2391 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2392 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2393 if (!target_mtext) {
2394 ret = -TARGET_EFAULT;
2397 memcpy(target_mb->mtext, host_mb->mtext, ret);
2398 unlock_user(target_mtext, target_mtext_addr, ret);
2401 target_mb->mtype = tswapl(host_mb->mtype);
2406 unlock_user_struct(target_mb, msgp, 1);
2410 struct target_shmid_ds
2412 struct target_ipc_perm shm_perm;
2413 abi_ulong shm_segsz;
2414 abi_ulong shm_atime;
2415 #if TARGET_ABI_BITS == 32
2416 abi_ulong __unused1;
2418 abi_ulong shm_dtime;
2419 #if TARGET_ABI_BITS == 32
2420 abi_ulong __unused2;
2422 abi_ulong shm_ctime;
2423 #if TARGET_ABI_BITS == 32
2424 abi_ulong __unused3;
2428 abi_ulong shm_nattch;
2429 unsigned long int __unused4;
2430 unsigned long int __unused5;
2433 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2434 abi_ulong target_addr)
2436 struct target_shmid_ds *target_sd;
2438 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2439 return -TARGET_EFAULT;
2440 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2441 return -TARGET_EFAULT;
2442 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2443 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2444 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2445 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2446 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2447 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2448 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2449 unlock_user_struct(target_sd, target_addr, 0);
2453 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2454 struct shmid_ds *host_sd)
2456 struct target_shmid_ds *target_sd;
2458 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2459 return -TARGET_EFAULT;
2460 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2461 return -TARGET_EFAULT;
2462 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2463 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2464 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2465 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2466 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2467 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2468 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2469 unlock_user_struct(target_sd, target_addr, 1);
2473 struct target_shminfo {
2481 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2482 struct shminfo *host_shminfo)
2484 struct target_shminfo *target_shminfo;
2485 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2486 return -TARGET_EFAULT;
2487 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2488 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2489 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2490 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2491 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2492 unlock_user_struct(target_shminfo, target_addr, 1);
2496 struct target_shm_info {
2501 abi_ulong swap_attempts;
2502 abi_ulong swap_successes;
2505 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2506 struct shm_info *host_shm_info)
2508 struct target_shm_info *target_shm_info;
2509 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2510 return -TARGET_EFAULT;
2511 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2512 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2513 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2514 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2515 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2516 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2517 unlock_user_struct(target_shm_info, target_addr, 1);
2521 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2523 struct shmid_ds dsarg;
2524 struct shminfo shminfo;
2525 struct shm_info shm_info;
2526 abi_long ret = -TARGET_EINVAL;
2534 if (target_to_host_shmid_ds(&dsarg, buf))
2535 return -TARGET_EFAULT;
2536 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2537 if (host_to_target_shmid_ds(buf, &dsarg))
2538 return -TARGET_EFAULT;
2541 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2542 if (host_to_target_shminfo(buf, &shminfo))
2543 return -TARGET_EFAULT;
2546 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2547 if (host_to_target_shm_info(buf, &shm_info))
2548 return -TARGET_EFAULT;
2553 ret = get_errno(shmctl(shmid, cmd, NULL));
2560 static inline abi_long do_shmat(int shmid, abi_ulong shmaddr, int shmflg,
2561 unsigned long *raddr)
2563 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
2565 struct shmid_ds shm_info;
2568 /* find out the length of the shared memory segment */
2569 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2570 if (is_error(ret)) {
2571 /* can't get length, bail out */
2572 return get_errno(ret);
2578 *raddr = (unsigned long) shmat(shmid, g2h(shmaddr), shmflg);
2580 abi_ulong mmap_start;
2582 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2584 if (mmap_start == -1) {
2588 *raddr = (unsigned long) shmat(shmid, g2h(mmap_start),
2589 shmflg | SHM_REMAP);
2594 return get_errno(*raddr);
2597 page_set_flags(h2g(*raddr), h2g(*raddr) + shm_info.shm_segsz,
2598 PAGE_VALID | PAGE_READ |
2599 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2601 for (i = 0; i < N_SHM_REGIONS; i++) {
2602 if (shm_regions[i].start == 0) {
2603 shm_regions[i].start = h2g(*raddr);
2604 shm_regions[i].size = shm_info.shm_segsz;
2609 *raddr = h2g(*raddr);
2615 static inline abi_long do_shmdt(abi_ulong shmaddr)
2619 for (i = 0; i < N_SHM_REGIONS; ++i) {
2620 if (shm_regions[i].start == shmaddr) {
2621 shm_regions[i].start = 0;
2622 page_set_flags(shmaddr, shm_regions[i].size, 0);
2627 return get_errno(shmdt(g2h(shmaddr)));
2630 #ifdef TARGET_NR_ipc
2631 /* ??? This only works with linear mappings. */
2632 /* do_ipc() must return target values and target errnos. */
2633 static abi_long do_ipc(unsigned int call, int first,
2634 int second, int third,
2635 abi_long ptr, abi_long fifth)
2640 version = call >> 16;
2645 ret = do_semop(first, ptr, second);
2649 ret = get_errno(semget(first, second, third));
2653 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2657 ret = get_errno(msgget(first, second));
2661 ret = do_msgsnd(first, ptr, second, third);
2665 ret = do_msgctl(first, second, ptr);
2672 struct target_ipc_kludge {
2677 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2678 ret = -TARGET_EFAULT;
2682 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2684 unlock_user_struct(tmp, ptr, 0);
2688 ret = do_msgrcv(first, ptr, second, fifth, third);
2696 unsigned long raddr;
2698 ret = do_shmat(first, ptr, second, &raddr);
2702 ret = put_user_ual(raddr, third);
2706 ret = -TARGET_EINVAL;
2712 ret = do_shmdt(ptr);
2716 ret = get_errno(shmget(first, second, third));
2720 ret = do_shmctl(first, second, third);
2724 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2725 ret = -TARGET_ENOSYS;
2732 /* kernel structure types definitions */
2735 #define STRUCT(name, list...) STRUCT_ ## name,
2736 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2738 #include "syscall_types.h"
2741 #undef STRUCT_SPECIAL
2743 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2744 #define STRUCT_SPECIAL(name)
2745 #include "syscall_types.h"
2747 #undef STRUCT_SPECIAL
2749 typedef struct IOCTLEntry {
2750 unsigned int target_cmd;
2751 unsigned int host_cmd;
2754 const argtype arg_type[5];
2757 #define IOC_R 0x0001
2758 #define IOC_W 0x0002
2759 #define IOC_RW (IOC_R | IOC_W)
2761 #define MAX_STRUCT_SIZE 4096
2763 static IOCTLEntry ioctl_entries[] = {
2764 #define IOCTL(cmd, access, types...) \
2765 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2770 /* ??? Implement proper locking for ioctls. */
2771 /* do_ioctl() Must return target values and target errnos. */
2772 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2774 const IOCTLEntry *ie;
2775 const argtype *arg_type;
2777 uint8_t buf_temp[MAX_STRUCT_SIZE];
2783 if (ie->target_cmd == 0) {
2784 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2785 return -TARGET_ENOSYS;
2787 if (ie->target_cmd == cmd)
2791 arg_type = ie->arg_type;
2793 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2795 switch(arg_type[0]) {
2798 ret = get_errno(ioctl(fd, ie->host_cmd));
2803 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2807 target_size = thunk_type_size(arg_type, 0);
2808 switch(ie->access) {
2810 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2811 if (!is_error(ret)) {
2812 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2814 return -TARGET_EFAULT;
2815 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2816 unlock_user(argptr, arg, target_size);
2820 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2822 return -TARGET_EFAULT;
2823 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2824 unlock_user(argptr, arg, 0);
2825 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2829 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2831 return -TARGET_EFAULT;
2832 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2833 unlock_user(argptr, arg, 0);
2834 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2835 if (!is_error(ret)) {
2836 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2838 return -TARGET_EFAULT;
2839 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2840 unlock_user(argptr, arg, target_size);
2846 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2847 (long)cmd, arg_type[0]);
2848 ret = -TARGET_ENOSYS;
2854 static const bitmask_transtbl iflag_tbl[] = {
2855 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2856 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2857 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2858 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2859 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2860 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2861 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2862 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2863 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2864 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2865 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2866 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2867 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2868 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2872 static const bitmask_transtbl oflag_tbl[] = {
2873 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2874 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2875 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2876 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2877 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2878 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2879 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2880 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2881 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2882 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2883 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2884 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2885 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2886 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2887 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2888 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2889 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2890 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2891 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2892 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2893 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2894 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2895 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2896 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2900 static const bitmask_transtbl cflag_tbl[] = {
2901 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2902 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2903 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2904 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2905 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2906 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2907 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2908 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2909 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2910 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2911 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2912 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2913 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2914 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2915 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2916 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2917 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2918 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2919 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2920 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2921 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2922 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2923 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2924 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2925 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2926 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2927 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2928 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2929 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2930 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2931 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2935 static const bitmask_transtbl lflag_tbl[] = {
2936 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2937 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2938 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2939 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2940 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2941 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2942 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2943 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2944 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2945 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2946 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2947 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2948 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2949 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2950 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2954 static void target_to_host_termios (void *dst, const void *src)
2956 struct host_termios *host = dst;
2957 const struct target_termios *target = src;
2960 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2962 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2964 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2966 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2967 host->c_line = target->c_line;
2969 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2970 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2971 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2972 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2973 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2974 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2975 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2976 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2977 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2978 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2979 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2980 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2981 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2982 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2983 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2984 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2985 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2988 static void host_to_target_termios (void *dst, const void *src)
2990 struct target_termios *target = dst;
2991 const struct host_termios *host = src;
2994 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2996 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2998 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3000 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3001 target->c_line = host->c_line;
3003 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3004 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3005 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3006 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3007 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3008 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3009 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3010 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3011 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3012 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3013 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3014 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3015 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3016 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3017 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3018 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3019 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3022 static const StructEntry struct_termios_def = {
3023 .convert = { host_to_target_termios, target_to_host_termios },
3024 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3025 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3028 static bitmask_transtbl mmap_flags_tbl[] = {
3029 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3030 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3031 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3032 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3033 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3034 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3036 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3040 #if defined(TARGET_I386)
3042 /* NOTE: there is really one LDT for all the threads */
3043 static uint8_t *ldt_table;
3045 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3052 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3053 if (size > bytecount)
3055 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3057 return -TARGET_EFAULT;
3058 /* ??? Should this by byteswapped? */
3059 memcpy(p, ldt_table, size);
3060 unlock_user(p, ptr, size);
3064 /* XXX: add locking support */
3065 static abi_long write_ldt(CPUX86State *env,
3066 abi_ulong ptr, unsigned long bytecount, int oldmode)
3068 struct target_modify_ldt_ldt_s ldt_info;
3069 struct target_modify_ldt_ldt_s *target_ldt_info;
3070 int seg_32bit, contents, read_exec_only, limit_in_pages;
3071 int seg_not_present, useable, lm;
3072 uint32_t *lp, entry_1, entry_2;
3074 if (bytecount != sizeof(ldt_info))
3075 return -TARGET_EINVAL;
3076 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3077 return -TARGET_EFAULT;
3078 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3079 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3080 ldt_info.limit = tswap32(target_ldt_info->limit);
3081 ldt_info.flags = tswap32(target_ldt_info->flags);
3082 unlock_user_struct(target_ldt_info, ptr, 0);
3084 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3085 return -TARGET_EINVAL;
3086 seg_32bit = ldt_info.flags & 1;
3087 contents = (ldt_info.flags >> 1) & 3;
3088 read_exec_only = (ldt_info.flags >> 3) & 1;
3089 limit_in_pages = (ldt_info.flags >> 4) & 1;
3090 seg_not_present = (ldt_info.flags >> 5) & 1;
3091 useable = (ldt_info.flags >> 6) & 1;
3095 lm = (ldt_info.flags >> 7) & 1;
3097 if (contents == 3) {
3099 return -TARGET_EINVAL;
3100 if (seg_not_present == 0)
3101 return -TARGET_EINVAL;
3103 /* allocate the LDT */
3105 env->ldt.base = target_mmap(0,
3106 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3107 PROT_READ|PROT_WRITE,
3108 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3109 if (env->ldt.base == -1)
3110 return -TARGET_ENOMEM;
3111 memset(g2h(env->ldt.base), 0,
3112 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3113 env->ldt.limit = 0xffff;
3114 ldt_table = g2h(env->ldt.base);
3117 /* NOTE: same code as Linux kernel */
3118 /* Allow LDTs to be cleared by the user. */
3119 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3122 read_exec_only == 1 &&
3124 limit_in_pages == 0 &&
3125 seg_not_present == 1 &&
3133 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3134 (ldt_info.limit & 0x0ffff);
3135 entry_2 = (ldt_info.base_addr & 0xff000000) |
3136 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3137 (ldt_info.limit & 0xf0000) |
3138 ((read_exec_only ^ 1) << 9) |
3140 ((seg_not_present ^ 1) << 15) |
3142 (limit_in_pages << 23) |
3146 entry_2 |= (useable << 20);
3148 /* Install the new entry ... */
3150 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3151 lp[0] = tswap32(entry_1);
3152 lp[1] = tswap32(entry_2);
3156 /* specific and weird i386 syscalls */
3157 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3158 unsigned long bytecount)
3164 ret = read_ldt(ptr, bytecount);
3167 ret = write_ldt(env, ptr, bytecount, 1);
3170 ret = write_ldt(env, ptr, bytecount, 0);
3173 ret = -TARGET_ENOSYS;
3179 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3180 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3182 uint64_t *gdt_table = g2h(env->gdt.base);
3183 struct target_modify_ldt_ldt_s ldt_info;
3184 struct target_modify_ldt_ldt_s *target_ldt_info;
3185 int seg_32bit, contents, read_exec_only, limit_in_pages;
3186 int seg_not_present, useable, lm;
3187 uint32_t *lp, entry_1, entry_2;
3190 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3191 if (!target_ldt_info)
3192 return -TARGET_EFAULT;
3193 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3194 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3195 ldt_info.limit = tswap32(target_ldt_info->limit);
3196 ldt_info.flags = tswap32(target_ldt_info->flags);
3197 if (ldt_info.entry_number == -1) {
3198 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3199 if (gdt_table[i] == 0) {
3200 ldt_info.entry_number = i;
3201 target_ldt_info->entry_number = tswap32(i);
3206 unlock_user_struct(target_ldt_info, ptr, 1);
3208 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3209 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3210 return -TARGET_EINVAL;
3211 seg_32bit = ldt_info.flags & 1;
3212 contents = (ldt_info.flags >> 1) & 3;
3213 read_exec_only = (ldt_info.flags >> 3) & 1;
3214 limit_in_pages = (ldt_info.flags >> 4) & 1;
3215 seg_not_present = (ldt_info.flags >> 5) & 1;
3216 useable = (ldt_info.flags >> 6) & 1;
3220 lm = (ldt_info.flags >> 7) & 1;
3223 if (contents == 3) {
3224 if (seg_not_present == 0)
3225 return -TARGET_EINVAL;
3228 /* NOTE: same code as Linux kernel */
3229 /* Allow LDTs to be cleared by the user. */
3230 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3231 if ((contents == 0 &&
3232 read_exec_only == 1 &&
3234 limit_in_pages == 0 &&
3235 seg_not_present == 1 &&
3243 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3244 (ldt_info.limit & 0x0ffff);
3245 entry_2 = (ldt_info.base_addr & 0xff000000) |
3246 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3247 (ldt_info.limit & 0xf0000) |
3248 ((read_exec_only ^ 1) << 9) |
3250 ((seg_not_present ^ 1) << 15) |
3252 (limit_in_pages << 23) |
3257 /* Install the new entry ... */
3259 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3260 lp[0] = tswap32(entry_1);
3261 lp[1] = tswap32(entry_2);
3265 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3267 struct target_modify_ldt_ldt_s *target_ldt_info;
3268 uint64_t *gdt_table = g2h(env->gdt.base);
3269 uint32_t base_addr, limit, flags;
3270 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3271 int seg_not_present, useable, lm;
3272 uint32_t *lp, entry_1, entry_2;
3274 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3275 if (!target_ldt_info)
3276 return -TARGET_EFAULT;
3277 idx = tswap32(target_ldt_info->entry_number);
3278 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3279 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3280 unlock_user_struct(target_ldt_info, ptr, 1);
3281 return -TARGET_EINVAL;
3283 lp = (uint32_t *)(gdt_table + idx);
3284 entry_1 = tswap32(lp[0]);
3285 entry_2 = tswap32(lp[1]);
3287 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3288 contents = (entry_2 >> 10) & 3;
3289 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3290 seg_32bit = (entry_2 >> 22) & 1;
3291 limit_in_pages = (entry_2 >> 23) & 1;
3292 useable = (entry_2 >> 20) & 1;
3296 lm = (entry_2 >> 21) & 1;
3298 flags = (seg_32bit << 0) | (contents << 1) |
3299 (read_exec_only << 3) | (limit_in_pages << 4) |
3300 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3301 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3302 base_addr = (entry_1 >> 16) |
3303 (entry_2 & 0xff000000) |
3304 ((entry_2 & 0xff) << 16);
3305 target_ldt_info->base_addr = tswapl(base_addr);
3306 target_ldt_info->limit = tswap32(limit);
3307 target_ldt_info->flags = tswap32(flags);
3308 unlock_user_struct(target_ldt_info, ptr, 1);
3311 #endif /* TARGET_I386 && TARGET_ABI32 */
3313 #ifndef TARGET_ABI32
3314 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3321 case TARGET_ARCH_SET_GS:
3322 case TARGET_ARCH_SET_FS:
3323 if (code == TARGET_ARCH_SET_GS)
3327 cpu_x86_load_seg(env, idx, 0);
3328 env->segs[idx].base = addr;
3330 case TARGET_ARCH_GET_GS:
3331 case TARGET_ARCH_GET_FS:
3332 if (code == TARGET_ARCH_GET_GS)
3336 val = env->segs[idx].base;
3337 if (put_user(val, addr, abi_ulong))
3338 return -TARGET_EFAULT;
3341 ret = -TARGET_EINVAL;
3348 #endif /* defined(TARGET_I386) */
3350 #if defined(USE_NPTL)
3352 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3354 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3357 pthread_mutex_t mutex;
3358 pthread_cond_t cond;
3362 abi_ulong child_tidptr;
3363 abi_ulong parent_tidptr;
3367 static void *clone_func(void *arg)
3369 new_thread_info *info = arg;
3374 info->tid = gettid();
3375 if (info->flags & CLONE_CHILD_SETTID)
3376 put_user_u32(info->tid, info->child_tidptr);
3377 if (info->flags & CLONE_CHILD_CLEARTID)
3378 set_tid_address(g2h(info->child_tidptr));
3379 if (info->flags & CLONE_PARENT_SETTID)
3380 put_user_u32(info->tid, info->parent_tidptr);
3381 /* Enable signals. */
3382 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3383 /* Signal to the parent that we're ready. */
3384 pthread_mutex_lock(&info->mutex);
3385 pthread_cond_broadcast(&info->cond);
3386 pthread_mutex_unlock(&info->mutex);
3387 /* Wait until the parent has finshed initializing the tls state. */
3388 pthread_mutex_lock(&clone_lock);
3389 pthread_mutex_unlock(&clone_lock);
3395 /* this stack is the equivalent of the kernel stack associated with a
3397 #define NEW_STACK_SIZE 8192
3399 static int clone_func(void *arg)
3401 CPUState *env = arg;
3408 /* do_fork() Must return host values and target errnos (unlike most
3409 do_*() functions). */
3410 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3411 abi_ulong parent_tidptr, target_ulong newtls,
3412 abi_ulong child_tidptr)
3418 #if defined(USE_NPTL)
3419 unsigned int nptl_flags;
3423 /* Emulate vfork() with fork() */
3424 if (flags & CLONE_VFORK)
3425 flags &= ~(CLONE_VFORK | CLONE_VM);
3427 if (flags & CLONE_VM) {
3428 #if defined(USE_NPTL)
3429 new_thread_info info;
3430 pthread_attr_t attr;
3432 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3433 init_task_state(ts);
3434 new_stack = ts->stack;
3435 /* we create a new CPU instance. */
3436 new_env = cpu_copy(env);
3437 /* Init regs that differ from the parent. */
3438 cpu_clone_regs(new_env, newsp);
3439 new_env->opaque = ts;
3440 #if defined(USE_NPTL)
3442 flags &= ~CLONE_NPTL_FLAGS2;
3444 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3445 ts->child_tidptr = child_tidptr;
3448 if (nptl_flags & CLONE_SETTLS)
3449 cpu_set_tls (new_env, newtls);
3451 /* Grab a mutex so that thread setup appears atomic. */
3452 pthread_mutex_lock(&clone_lock);
3454 memset(&info, 0, sizeof(info));
3455 pthread_mutex_init(&info.mutex, NULL);
3456 pthread_mutex_lock(&info.mutex);
3457 pthread_cond_init(&info.cond, NULL);
3459 info.flags = nptl_flags;
3460 if (nptl_flags & CLONE_CHILD_SETTID ||
3461 nptl_flags & CLONE_CHILD_CLEARTID)
3462 info.child_tidptr = child_tidptr;
3463 if (nptl_flags & CLONE_PARENT_SETTID)
3464 info.parent_tidptr = parent_tidptr;
3466 ret = pthread_attr_init(&attr);
3467 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3468 /* It is not safe to deliver signals until the child has finished
3469 initializing, so temporarily block all signals. */
3470 sigfillset(&sigmask);
3471 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3473 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3474 /* TODO: Free new CPU state if thread creation failed. */
3476 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3477 pthread_attr_destroy(&attr);
3479 /* Wait for the child to initialize. */
3480 pthread_cond_wait(&info.cond, &info.mutex);
3482 if (flags & CLONE_PARENT_SETTID)
3483 put_user_u32(ret, parent_tidptr);
3487 pthread_mutex_unlock(&info.mutex);
3488 pthread_cond_destroy(&info.cond);
3489 pthread_mutex_destroy(&info.mutex);
3490 pthread_mutex_unlock(&clone_lock);
3492 if (flags & CLONE_NPTL_FLAGS2)
3494 /* This is probably going to die very quickly, but do it anyway. */
3496 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3498 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3502 /* if no CLONE_VM, we consider it is a fork */
3503 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3508 /* Child Process. */
3509 cpu_clone_regs(env, newsp);
3511 #if defined(USE_NPTL)
3512 /* There is a race condition here. The parent process could
3513 theoretically read the TID in the child process before the child
3514 tid is set. This would require using either ptrace
3515 (not implemented) or having *_tidptr to point at a shared memory
3516 mapping. We can't repeat the spinlock hack used above because
3517 the child process gets its own copy of the lock. */
3518 if (flags & CLONE_CHILD_SETTID)
3519 put_user_u32(gettid(), child_tidptr);
3520 if (flags & CLONE_PARENT_SETTID)
3521 put_user_u32(gettid(), parent_tidptr);
3522 ts = (TaskState *)env->opaque;
3523 if (flags & CLONE_SETTLS)
3524 cpu_set_tls (env, newtls);
3525 if (flags & CLONE_CHILD_CLEARTID)
3526 ts->child_tidptr = child_tidptr;
3535 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3538 struct target_flock *target_fl;
3539 struct flock64 fl64;
3540 struct target_flock64 *target_fl64;
3544 case TARGET_F_GETLK:
3545 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3546 return -TARGET_EFAULT;
3547 fl.l_type = tswap16(target_fl->l_type);
3548 fl.l_whence = tswap16(target_fl->l_whence);
3549 fl.l_start = tswapl(target_fl->l_start);
3550 fl.l_len = tswapl(target_fl->l_len);
3551 fl.l_pid = tswapl(target_fl->l_pid);
3552 unlock_user_struct(target_fl, arg, 0);
3553 ret = get_errno(fcntl(fd, cmd, &fl));
3555 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3556 return -TARGET_EFAULT;
3557 target_fl->l_type = tswap16(fl.l_type);
3558 target_fl->l_whence = tswap16(fl.l_whence);
3559 target_fl->l_start = tswapl(fl.l_start);
3560 target_fl->l_len = tswapl(fl.l_len);
3561 target_fl->l_pid = tswapl(fl.l_pid);
3562 unlock_user_struct(target_fl, arg, 1);
3566 case TARGET_F_SETLK:
3567 case TARGET_F_SETLKW:
3568 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3569 return -TARGET_EFAULT;
3570 fl.l_type = tswap16(target_fl->l_type);
3571 fl.l_whence = tswap16(target_fl->l_whence);
3572 fl.l_start = tswapl(target_fl->l_start);
3573 fl.l_len = tswapl(target_fl->l_len);
3574 fl.l_pid = tswapl(target_fl->l_pid);
3575 unlock_user_struct(target_fl, arg, 0);
3576 ret = get_errno(fcntl(fd, cmd, &fl));
3579 case TARGET_F_GETLK64:
3580 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3581 return -TARGET_EFAULT;
3582 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3583 fl64.l_whence = tswap16(target_fl64->l_whence);
3584 fl64.l_start = tswapl(target_fl64->l_start);
3585 fl64.l_len = tswapl(target_fl64->l_len);
3586 fl64.l_pid = tswap16(target_fl64->l_pid);
3587 unlock_user_struct(target_fl64, arg, 0);
3588 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3590 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3591 return -TARGET_EFAULT;
3592 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3593 target_fl64->l_whence = tswap16(fl64.l_whence);
3594 target_fl64->l_start = tswapl(fl64.l_start);
3595 target_fl64->l_len = tswapl(fl64.l_len);
3596 target_fl64->l_pid = tswapl(fl64.l_pid);
3597 unlock_user_struct(target_fl64, arg, 1);
3600 case TARGET_F_SETLK64:
3601 case TARGET_F_SETLKW64:
3602 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3603 return -TARGET_EFAULT;
3604 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3605 fl64.l_whence = tswap16(target_fl64->l_whence);
3606 fl64.l_start = tswapl(target_fl64->l_start);
3607 fl64.l_len = tswapl(target_fl64->l_len);
3608 fl64.l_pid = tswap16(target_fl64->l_pid);
3609 unlock_user_struct(target_fl64, arg, 0);
3610 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3614 ret = get_errno(fcntl(fd, cmd, arg));
3616 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3621 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3625 ret = get_errno(fcntl(fd, cmd, arg));
3633 static inline int high2lowuid(int uid)
3641 static inline int high2lowgid(int gid)
3649 static inline int low2highuid(int uid)
3651 if ((int16_t)uid == -1)
3657 static inline int low2highgid(int gid)
3659 if ((int16_t)gid == -1)
3665 #endif /* USE_UID16 */
3667 void syscall_init(void)
3670 const argtype *arg_type;
3674 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3675 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3676 #include "syscall_types.h"
3678 #undef STRUCT_SPECIAL
3680 /* we patch the ioctl size if necessary. We rely on the fact that
3681 no ioctl has all the bits at '1' in the size field */
3683 while (ie->target_cmd != 0) {
3684 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3685 TARGET_IOC_SIZEMASK) {
3686 arg_type = ie->arg_type;
3687 if (arg_type[0] != TYPE_PTR) {
3688 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3693 size = thunk_type_size(arg_type, 0);
3694 ie->target_cmd = (ie->target_cmd &
3695 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3696 (size << TARGET_IOC_SIZESHIFT);
3699 /* Build target_to_host_errno_table[] table from
3700 * host_to_target_errno_table[]. */
3701 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3702 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3704 /* automatic consistency check if same arch */
3705 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3706 (defined(__x86_64__) && defined(TARGET_X86_64))
3707 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3708 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3709 ie->name, ie->target_cmd, ie->host_cmd);
3716 #if TARGET_ABI_BITS == 32
3717 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3719 #ifdef TARGET_WORDS_BIGENDIAN
3720 return ((uint64_t)word0 << 32) | word1;
3722 return ((uint64_t)word1 << 32) | word0;
3725 #else /* TARGET_ABI_BITS == 32 */
3726 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3730 #endif /* TARGET_ABI_BITS != 32 */
3732 #ifdef TARGET_NR_truncate64
3733 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3739 if (((CPUARMState *)cpu_env)->eabi)
3745 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3749 #ifdef TARGET_NR_ftruncate64
3750 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3756 if (((CPUARMState *)cpu_env)->eabi)
3762 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3766 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3767 abi_ulong target_addr)
3769 struct target_timespec *target_ts;
3771 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3772 return -TARGET_EFAULT;
3773 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3774 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3775 unlock_user_struct(target_ts, target_addr, 0);
3779 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3780 struct timespec *host_ts)
3782 struct target_timespec *target_ts;
3784 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3785 return -TARGET_EFAULT;
3786 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3787 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3788 unlock_user_struct(target_ts, target_addr, 1);
3792 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3793 static inline abi_long host_to_target_stat64(void *cpu_env,
3794 abi_ulong target_addr,
3795 struct stat *host_st)
3798 if (((CPUARMState *)cpu_env)->eabi) {
3799 struct target_eabi_stat64 *target_st;
3801 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3802 return -TARGET_EFAULT;
3803 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3804 __put_user(host_st->st_dev, &target_st->st_dev);
3805 __put_user(host_st->st_ino, &target_st->st_ino);
3806 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3807 __put_user(host_st->st_ino, &target_st->__st_ino);
3809 __put_user(host_st->st_mode, &target_st->st_mode);
3810 __put_user(host_st->st_nlink, &target_st->st_nlink);
3811 __put_user(host_st->st_uid, &target_st->st_uid);
3812 __put_user(host_st->st_gid, &target_st->st_gid);
3813 __put_user(host_st->st_rdev, &target_st->st_rdev);
3814 __put_user(host_st->st_size, &target_st->st_size);
3815 __put_user(host_st->st_blksize, &target_st->st_blksize);
3816 __put_user(host_st->st_blocks, &target_st->st_blocks);
3817 __put_user(host_st->st_atime, &target_st->target_st_atime);
3818 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3819 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3820 unlock_user_struct(target_st, target_addr, 1);
3824 #if TARGET_LONG_BITS == 64
3825 struct target_stat *target_st;
3827 struct target_stat64 *target_st;
3830 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3831 return -TARGET_EFAULT;
3832 memset(target_st, 0, sizeof(*target_st));
3833 __put_user(host_st->st_dev, &target_st->st_dev);
3834 __put_user(host_st->st_ino, &target_st->st_ino);
3835 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3836 __put_user(host_st->st_ino, &target_st->__st_ino);
3838 __put_user(host_st->st_mode, &target_st->st_mode);
3839 __put_user(host_st->st_nlink, &target_st->st_nlink);
3840 __put_user(host_st->st_uid, &target_st->st_uid);
3841 __put_user(host_st->st_gid, &target_st->st_gid);
3842 __put_user(host_st->st_rdev, &target_st->st_rdev);
3843 /* XXX: better use of kernel struct */
3844 __put_user(host_st->st_size, &target_st->st_size);
3845 __put_user(host_st->st_blksize, &target_st->st_blksize);
3846 __put_user(host_st->st_blocks, &target_st->st_blocks);
3847 __put_user(host_st->st_atime, &target_st->target_st_atime);
3848 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3849 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3850 unlock_user_struct(target_st, target_addr, 1);
3857 #if defined(USE_NPTL)
3858 /* ??? Using host futex calls even when target atomic operations
3859 are not really atomic probably breaks things. However implementing
3860 futexes locally would make futexes shared between multiple processes
3861 tricky. However they're probably useless because guest atomic
3862 operations won't work either. */
3863 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3864 target_ulong uaddr2, int val3)
3866 struct timespec ts, *pts;
3868 /* ??? We assume FUTEX_* constants are the same on both host
3874 target_to_host_timespec(pts, timeout);
3878 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3881 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3883 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3885 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3886 NULL, g2h(uaddr2), 0));
3887 case FUTEX_CMP_REQUEUE:
3888 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3889 NULL, g2h(uaddr2), tswap32(val3)));
3891 return -TARGET_ENOSYS;
3896 int get_osversion(void)
3898 static int osversion;
3899 struct new_utsname buf;
3904 if (qemu_uname_release && *qemu_uname_release) {
3905 s = qemu_uname_release;
3907 if (sys_uname(&buf))
3912 for (i = 0; i < 3; i++) {
3914 while (*s >= '0' && *s <= '9') {
3919 tmp = (tmp << 8) + n;
3927 /* do_syscall() should always have a single exit point at the end so
3928 that actions, such as logging of syscall results, can be performed.
3929 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3930 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3931 abi_long arg2, abi_long arg3, abi_long arg4,
3932 abi_long arg5, abi_long arg6)
3940 gemu_log("syscall %d", num);
3943 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3946 case TARGET_NR_exit:
3948 /* In old applications this may be used to implement _exit(2).
3949 However in threaded applictions it is used for thread termination,
3950 and _exit_group is used for application termination.
3951 Do thread termination if we have more then one thread. */
3952 /* FIXME: This probably breaks if a signal arrives. We should probably
3953 be disabling signals. */
3954 if (first_cpu->next_cpu) {
3961 while (p && p != (CPUState *)cpu_env) {
3962 lastp = &p->next_cpu;
3965 /* If we didn't find the CPU for this thread then something is
3969 /* Remove the CPU from the list. */
3970 *lastp = p->next_cpu;
3972 TaskState *ts = ((CPUState *)cpu_env)->opaque;
3973 if (ts->child_tidptr) {
3974 put_user_u32(0, ts->child_tidptr);
3975 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
3978 /* TODO: Free CPU state. */
3985 gdb_exit(cpu_env, arg1);
3987 ret = 0; /* avoid warning */
3989 case TARGET_NR_read:
3993 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3995 ret = get_errno(read(arg1, p, arg3));
3996 unlock_user(p, arg2, ret);
3999 case TARGET_NR_write:
4000 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4002 ret = get_errno(write(arg1, p, arg3));
4003 unlock_user(p, arg2, 0);
4005 case TARGET_NR_open:
4006 if (!(p = lock_user_string(arg1)))
4008 ret = get_errno(open(path(p),
4009 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4011 unlock_user(p, arg1, 0);
4013 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4014 case TARGET_NR_openat:
4015 if (!(p = lock_user_string(arg2)))
4017 ret = get_errno(sys_openat(arg1,
4019 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4021 unlock_user(p, arg2, 0);
4024 case TARGET_NR_close:
4025 ret = get_errno(close(arg1));
4030 case TARGET_NR_fork:
4031 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4033 #ifdef TARGET_NR_waitpid
4034 case TARGET_NR_waitpid:
4037 ret = get_errno(waitpid(arg1, &status, arg3));
4038 if (!is_error(ret) && arg2
4039 && put_user_s32(status, arg2))
4044 #ifdef TARGET_NR_waitid
4045 case TARGET_NR_waitid:
4049 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4050 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4051 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4053 host_to_target_siginfo(p, &info);
4054 unlock_user(p, arg3, sizeof(target_siginfo_t));
4059 #ifdef TARGET_NR_creat /* not on alpha */
4060 case TARGET_NR_creat:
4061 if (!(p = lock_user_string(arg1)))
4063 ret = get_errno(creat(p, arg2));
4064 unlock_user(p, arg1, 0);
4067 case TARGET_NR_link:
4070 p = lock_user_string(arg1);
4071 p2 = lock_user_string(arg2);
4073 ret = -TARGET_EFAULT;
4075 ret = get_errno(link(p, p2));
4076 unlock_user(p2, arg2, 0);
4077 unlock_user(p, arg1, 0);
4080 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4081 case TARGET_NR_linkat:
4086 p = lock_user_string(arg2);
4087 p2 = lock_user_string(arg4);
4089 ret = -TARGET_EFAULT;
4091 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4092 unlock_user(p, arg2, 0);
4093 unlock_user(p2, arg4, 0);
4097 case TARGET_NR_unlink:
4098 if (!(p = lock_user_string(arg1)))
4100 ret = get_errno(unlink(p));
4101 unlock_user(p, arg1, 0);
4103 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4104 case TARGET_NR_unlinkat:
4105 if (!(p = lock_user_string(arg2)))
4107 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4108 unlock_user(p, arg2, 0);
4111 case TARGET_NR_execve:
4113 char **argp, **envp;
4116 abi_ulong guest_argp;
4117 abi_ulong guest_envp;
4123 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4124 if (get_user_ual(addr, gp))
4132 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4133 if (get_user_ual(addr, gp))
4140 argp = alloca((argc + 1) * sizeof(void *));
4141 envp = alloca((envc + 1) * sizeof(void *));
4143 for (gp = guest_argp, q = argp; gp;
4144 gp += sizeof(abi_ulong), q++) {
4145 if (get_user_ual(addr, gp))
4149 if (!(*q = lock_user_string(addr)))
4154 for (gp = guest_envp, q = envp; gp;
4155 gp += sizeof(abi_ulong), q++) {
4156 if (get_user_ual(addr, gp))
4160 if (!(*q = lock_user_string(addr)))
4165 if (!(p = lock_user_string(arg1)))
4167 ret = get_errno(execve(p, argp, envp));
4168 unlock_user(p, arg1, 0);
4173 ret = -TARGET_EFAULT;
4176 for (gp = guest_argp, q = argp; *q;
4177 gp += sizeof(abi_ulong), q++) {
4178 if (get_user_ual(addr, gp)
4181 unlock_user(*q, addr, 0);
4183 for (gp = guest_envp, q = envp; *q;
4184 gp += sizeof(abi_ulong), q++) {
4185 if (get_user_ual(addr, gp)
4188 unlock_user(*q, addr, 0);
4192 case TARGET_NR_chdir:
4193 if (!(p = lock_user_string(arg1)))
4195 ret = get_errno(chdir(p));
4196 unlock_user(p, arg1, 0);
4198 #ifdef TARGET_NR_time
4199 case TARGET_NR_time:
4202 ret = get_errno(time(&host_time));
4205 && put_user_sal(host_time, arg1))
4210 case TARGET_NR_mknod:
4211 if (!(p = lock_user_string(arg1)))
4213 ret = get_errno(mknod(p, arg2, arg3));
4214 unlock_user(p, arg1, 0);
4216 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4217 case TARGET_NR_mknodat:
4218 if (!(p = lock_user_string(arg2)))
4220 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4221 unlock_user(p, arg2, 0);
4224 case TARGET_NR_chmod:
4225 if (!(p = lock_user_string(arg1)))
4227 ret = get_errno(chmod(p, arg2));
4228 unlock_user(p, arg1, 0);
4230 #ifdef TARGET_NR_break
4231 case TARGET_NR_break:
4234 #ifdef TARGET_NR_oldstat
4235 case TARGET_NR_oldstat:
4238 case TARGET_NR_lseek:
4239 ret = get_errno(lseek(arg1, arg2, arg3));
4241 #ifdef TARGET_NR_getxpid
4242 case TARGET_NR_getxpid:
4244 case TARGET_NR_getpid:
4246 ret = get_errno(getpid());
4248 case TARGET_NR_mount:
4250 /* need to look at the data field */
4252 p = lock_user_string(arg1);
4253 p2 = lock_user_string(arg2);
4254 p3 = lock_user_string(arg3);
4255 if (!p || !p2 || !p3)
4256 ret = -TARGET_EFAULT;
4258 /* FIXME - arg5 should be locked, but it isn't clear how to
4259 * do that since it's not guaranteed to be a NULL-terminated
4262 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4263 unlock_user(p, arg1, 0);
4264 unlock_user(p2, arg2, 0);
4265 unlock_user(p3, arg3, 0);
4268 #ifdef TARGET_NR_umount
4269 case TARGET_NR_umount:
4270 if (!(p = lock_user_string(arg1)))
4272 ret = get_errno(umount(p));
4273 unlock_user(p, arg1, 0);
4276 #ifdef TARGET_NR_stime /* not on alpha */
4277 case TARGET_NR_stime:
4280 if (get_user_sal(host_time, arg1))
4282 ret = get_errno(stime(&host_time));
4286 case TARGET_NR_ptrace:
4288 #ifdef TARGET_NR_alarm /* not on alpha */
4289 case TARGET_NR_alarm:
4293 #ifdef TARGET_NR_oldfstat
4294 case TARGET_NR_oldfstat:
4297 #ifdef TARGET_NR_pause /* not on alpha */
4298 case TARGET_NR_pause:
4299 ret = get_errno(pause());
4302 #ifdef TARGET_NR_utime
4303 case TARGET_NR_utime:
4305 struct utimbuf tbuf, *host_tbuf;
4306 struct target_utimbuf *target_tbuf;
4308 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4310 tbuf.actime = tswapl(target_tbuf->actime);
4311 tbuf.modtime = tswapl(target_tbuf->modtime);
4312 unlock_user_struct(target_tbuf, arg2, 0);
4317 if (!(p = lock_user_string(arg1)))
4319 ret = get_errno(utime(p, host_tbuf));
4320 unlock_user(p, arg1, 0);
4324 case TARGET_NR_utimes:
4326 struct timeval *tvp, tv[2];
4328 if (copy_from_user_timeval(&tv[0], arg2)
4329 || copy_from_user_timeval(&tv[1],
4330 arg2 + sizeof(struct target_timeval)))
4336 if (!(p = lock_user_string(arg1)))
4338 ret = get_errno(utimes(p, tvp));
4339 unlock_user(p, arg1, 0);
4342 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4343 case TARGET_NR_futimesat:
4345 struct timeval *tvp, tv[2];
4347 if (copy_from_user_timeval(&tv[0], arg3)
4348 || copy_from_user_timeval(&tv[1],
4349 arg3 + sizeof(struct target_timeval)))
4355 if (!(p = lock_user_string(arg2)))
4357 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4358 unlock_user(p, arg2, 0);
4362 #ifdef TARGET_NR_stty
4363 case TARGET_NR_stty:
4366 #ifdef TARGET_NR_gtty
4367 case TARGET_NR_gtty:
4370 case TARGET_NR_access:
4371 if (!(p = lock_user_string(arg1)))
4373 ret = get_errno(access(p, arg2));
4374 unlock_user(p, arg1, 0);
4376 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4377 case TARGET_NR_faccessat:
4378 if (!(p = lock_user_string(arg2)))
4380 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
4381 unlock_user(p, arg2, 0);
4384 #ifdef TARGET_NR_nice /* not on alpha */
4385 case TARGET_NR_nice:
4386 ret = get_errno(nice(arg1));
4389 #ifdef TARGET_NR_ftime
4390 case TARGET_NR_ftime:
4393 case TARGET_NR_sync:
4397 case TARGET_NR_kill:
4398 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4400 case TARGET_NR_rename:
4403 p = lock_user_string(arg1);
4404 p2 = lock_user_string(arg2);
4406 ret = -TARGET_EFAULT;
4408 ret = get_errno(rename(p, p2));
4409 unlock_user(p2, arg2, 0);
4410 unlock_user(p, arg1, 0);
4413 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4414 case TARGET_NR_renameat:
4417 p = lock_user_string(arg2);
4418 p2 = lock_user_string(arg4);
4420 ret = -TARGET_EFAULT;
4422 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4423 unlock_user(p2, arg4, 0);
4424 unlock_user(p, arg2, 0);
4428 case TARGET_NR_mkdir:
4429 if (!(p = lock_user_string(arg1)))
4431 ret = get_errno(mkdir(p, arg2));
4432 unlock_user(p, arg1, 0);
4434 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4435 case TARGET_NR_mkdirat:
4436 if (!(p = lock_user_string(arg2)))
4438 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4439 unlock_user(p, arg2, 0);
4442 case TARGET_NR_rmdir:
4443 if (!(p = lock_user_string(arg1)))
4445 ret = get_errno(rmdir(p));
4446 unlock_user(p, arg1, 0);
4449 ret = get_errno(dup(arg1));
4451 case TARGET_NR_pipe:
4454 ret = get_errno(pipe(host_pipe));
4455 if (!is_error(ret)) {
4456 #if defined(TARGET_MIPS)
4457 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4458 env->active_tc.gpr[3] = host_pipe[1];
4460 #elif defined(TARGET_SH4)
4461 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4464 if (put_user_s32(host_pipe[0], arg1)
4465 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4471 case TARGET_NR_times:
4473 struct target_tms *tmsp;
4475 ret = get_errno(times(&tms));
4477 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4480 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4481 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4482 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4483 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4486 ret = host_to_target_clock_t(ret);
4489 #ifdef TARGET_NR_prof
4490 case TARGET_NR_prof:
4493 #ifdef TARGET_NR_signal
4494 case TARGET_NR_signal:
4497 case TARGET_NR_acct:
4499 ret = get_errno(acct(NULL));
4501 if (!(p = lock_user_string(arg1)))
4503 ret = get_errno(acct(path(p)));
4504 unlock_user(p, arg1, 0);
4507 #ifdef TARGET_NR_umount2 /* not on alpha */
4508 case TARGET_NR_umount2:
4509 if (!(p = lock_user_string(arg1)))
4511 ret = get_errno(umount2(p, arg2));
4512 unlock_user(p, arg1, 0);
4515 #ifdef TARGET_NR_lock
4516 case TARGET_NR_lock:
4519 case TARGET_NR_ioctl:
4520 ret = do_ioctl(arg1, arg2, arg3);
4522 case TARGET_NR_fcntl:
4523 ret = do_fcntl(arg1, arg2, arg3);
4525 #ifdef TARGET_NR_mpx
4529 case TARGET_NR_setpgid:
4530 ret = get_errno(setpgid(arg1, arg2));
4532 #ifdef TARGET_NR_ulimit
4533 case TARGET_NR_ulimit:
4536 #ifdef TARGET_NR_oldolduname
4537 case TARGET_NR_oldolduname:
4540 case TARGET_NR_umask:
4541 ret = get_errno(umask(arg1));
4543 case TARGET_NR_chroot:
4544 if (!(p = lock_user_string(arg1)))
4546 ret = get_errno(chroot(p));
4547 unlock_user(p, arg1, 0);
4549 case TARGET_NR_ustat:
4551 case TARGET_NR_dup2:
4552 ret = get_errno(dup2(arg1, arg2));
4554 #ifdef TARGET_NR_getppid /* not on alpha */
4555 case TARGET_NR_getppid:
4556 ret = get_errno(getppid());
4559 case TARGET_NR_getpgrp:
4560 ret = get_errno(getpgrp());
4562 case TARGET_NR_setsid:
4563 ret = get_errno(setsid());
4565 #ifdef TARGET_NR_sigaction
4566 case TARGET_NR_sigaction:
4568 #if !defined(TARGET_MIPS)
4569 struct target_old_sigaction *old_act;
4570 struct target_sigaction act, oact, *pact;
4572 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4574 act._sa_handler = old_act->_sa_handler;
4575 target_siginitset(&act.sa_mask, old_act->sa_mask);
4576 act.sa_flags = old_act->sa_flags;
4577 act.sa_restorer = old_act->sa_restorer;
4578 unlock_user_struct(old_act, arg2, 0);
4583 ret = get_errno(do_sigaction(arg1, pact, &oact));
4584 if (!is_error(ret) && arg3) {
4585 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4587 old_act->_sa_handler = oact._sa_handler;
4588 old_act->sa_mask = oact.sa_mask.sig[0];
4589 old_act->sa_flags = oact.sa_flags;
4590 old_act->sa_restorer = oact.sa_restorer;
4591 unlock_user_struct(old_act, arg3, 1);
4594 struct target_sigaction act, oact, *pact, *old_act;
4597 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4599 act._sa_handler = old_act->_sa_handler;
4600 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4601 act.sa_flags = old_act->sa_flags;
4602 unlock_user_struct(old_act, arg2, 0);
4608 ret = get_errno(do_sigaction(arg1, pact, &oact));
4610 if (!is_error(ret) && arg3) {
4611 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4613 old_act->_sa_handler = oact._sa_handler;
4614 old_act->sa_flags = oact.sa_flags;
4615 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4616 old_act->sa_mask.sig[1] = 0;
4617 old_act->sa_mask.sig[2] = 0;
4618 old_act->sa_mask.sig[3] = 0;
4619 unlock_user_struct(old_act, arg3, 1);
4625 case TARGET_NR_rt_sigaction:
4627 struct target_sigaction *act;
4628 struct target_sigaction *oact;
4631 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4636 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4637 ret = -TARGET_EFAULT;
4638 goto rt_sigaction_fail;
4642 ret = get_errno(do_sigaction(arg1, act, oact));
4645 unlock_user_struct(act, arg2, 0);
4647 unlock_user_struct(oact, arg3, 1);
4650 #ifdef TARGET_NR_sgetmask /* not on alpha */
4651 case TARGET_NR_sgetmask:
4654 abi_ulong target_set;
4655 sigprocmask(0, NULL, &cur_set);
4656 host_to_target_old_sigset(&target_set, &cur_set);
4661 #ifdef TARGET_NR_ssetmask /* not on alpha */
4662 case TARGET_NR_ssetmask:
4664 sigset_t set, oset, cur_set;
4665 abi_ulong target_set = arg1;
4666 sigprocmask(0, NULL, &cur_set);
4667 target_to_host_old_sigset(&set, &target_set);
4668 sigorset(&set, &set, &cur_set);
4669 sigprocmask(SIG_SETMASK, &set, &oset);
4670 host_to_target_old_sigset(&target_set, &oset);
4675 #ifdef TARGET_NR_sigprocmask
4676 case TARGET_NR_sigprocmask:
4679 sigset_t set, oldset, *set_ptr;
4683 case TARGET_SIG_BLOCK:
4686 case TARGET_SIG_UNBLOCK:
4689 case TARGET_SIG_SETMASK:
4693 ret = -TARGET_EINVAL;
4696 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4698 target_to_host_old_sigset(&set, p);
4699 unlock_user(p, arg2, 0);
4705 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4706 if (!is_error(ret) && arg3) {
4707 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4709 host_to_target_old_sigset(p, &oldset);
4710 unlock_user(p, arg3, sizeof(target_sigset_t));
4715 case TARGET_NR_rt_sigprocmask:
4718 sigset_t set, oldset, *set_ptr;
4722 case TARGET_SIG_BLOCK:
4725 case TARGET_SIG_UNBLOCK:
4728 case TARGET_SIG_SETMASK:
4732 ret = -TARGET_EINVAL;
4735 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4737 target_to_host_sigset(&set, p);
4738 unlock_user(p, arg2, 0);
4744 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4745 if (!is_error(ret) && arg3) {
4746 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4748 host_to_target_sigset(p, &oldset);
4749 unlock_user(p, arg3, sizeof(target_sigset_t));
4753 #ifdef TARGET_NR_sigpending
4754 case TARGET_NR_sigpending:
4757 ret = get_errno(sigpending(&set));
4758 if (!is_error(ret)) {
4759 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4761 host_to_target_old_sigset(p, &set);
4762 unlock_user(p, arg1, sizeof(target_sigset_t));
4767 case TARGET_NR_rt_sigpending:
4770 ret = get_errno(sigpending(&set));
4771 if (!is_error(ret)) {
4772 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4774 host_to_target_sigset(p, &set);
4775 unlock_user(p, arg1, sizeof(target_sigset_t));
4779 #ifdef TARGET_NR_sigsuspend
4780 case TARGET_NR_sigsuspend:
4783 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4785 target_to_host_old_sigset(&set, p);
4786 unlock_user(p, arg1, 0);
4787 ret = get_errno(sigsuspend(&set));
4791 case TARGET_NR_rt_sigsuspend:
4794 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4796 target_to_host_sigset(&set, p);
4797 unlock_user(p, arg1, 0);
4798 ret = get_errno(sigsuspend(&set));
4801 case TARGET_NR_rt_sigtimedwait:
4804 struct timespec uts, *puts;
4807 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4809 target_to_host_sigset(&set, p);
4810 unlock_user(p, arg1, 0);
4813 target_to_host_timespec(puts, arg3);
4817 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4818 if (!is_error(ret) && arg2) {
4819 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4821 host_to_target_siginfo(p, &uinfo);
4822 unlock_user(p, arg2, sizeof(target_siginfo_t));
4826 case TARGET_NR_rt_sigqueueinfo:
4829 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4831 target_to_host_siginfo(&uinfo, p);
4832 unlock_user(p, arg1, 0);
4833 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4836 #ifdef TARGET_NR_sigreturn
4837 case TARGET_NR_sigreturn:
4838 /* NOTE: ret is eax, so not transcoding must be done */
4839 ret = do_sigreturn(cpu_env);
4842 case TARGET_NR_rt_sigreturn:
4843 /* NOTE: ret is eax, so not transcoding must be done */
4844 ret = do_rt_sigreturn(cpu_env);
4846 case TARGET_NR_sethostname:
4847 if (!(p = lock_user_string(arg1)))
4849 ret = get_errno(sethostname(p, arg2));
4850 unlock_user(p, arg1, 0);
4852 case TARGET_NR_setrlimit:
4854 /* XXX: convert resource ? */
4855 int resource = arg1;
4856 struct target_rlimit *target_rlim;
4858 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4860 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4861 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4862 unlock_user_struct(target_rlim, arg2, 0);
4863 ret = get_errno(setrlimit(resource, &rlim));
4866 case TARGET_NR_getrlimit:
4868 /* XXX: convert resource ? */
4869 int resource = arg1;
4870 struct target_rlimit *target_rlim;
4873 ret = get_errno(getrlimit(resource, &rlim));
4874 if (!is_error(ret)) {
4875 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4877 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4878 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4879 unlock_user_struct(target_rlim, arg2, 1);
4883 case TARGET_NR_getrusage:
4885 struct rusage rusage;
4886 ret = get_errno(getrusage(arg1, &rusage));
4887 if (!is_error(ret)) {
4888 host_to_target_rusage(arg2, &rusage);
4892 case TARGET_NR_gettimeofday:
4895 ret = get_errno(gettimeofday(&tv, NULL));
4896 if (!is_error(ret)) {
4897 if (copy_to_user_timeval(arg1, &tv))
4902 case TARGET_NR_settimeofday:
4905 if (copy_from_user_timeval(&tv, arg1))
4907 ret = get_errno(settimeofday(&tv, NULL));
4910 #ifdef TARGET_NR_select
4911 case TARGET_NR_select:
4913 struct target_sel_arg_struct *sel;
4914 abi_ulong inp, outp, exp, tvp;
4917 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4919 nsel = tswapl(sel->n);
4920 inp = tswapl(sel->inp);
4921 outp = tswapl(sel->outp);
4922 exp = tswapl(sel->exp);
4923 tvp = tswapl(sel->tvp);
4924 unlock_user_struct(sel, arg1, 0);
4925 ret = do_select(nsel, inp, outp, exp, tvp);
4929 case TARGET_NR_symlink:
4932 p = lock_user_string(arg1);
4933 p2 = lock_user_string(arg2);
4935 ret = -TARGET_EFAULT;
4937 ret = get_errno(symlink(p, p2));
4938 unlock_user(p2, arg2, 0);
4939 unlock_user(p, arg1, 0);
4942 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4943 case TARGET_NR_symlinkat:
4946 p = lock_user_string(arg1);
4947 p2 = lock_user_string(arg3);
4949 ret = -TARGET_EFAULT;
4951 ret = get_errno(sys_symlinkat(p, arg2, p2));
4952 unlock_user(p2, arg3, 0);
4953 unlock_user(p, arg1, 0);
4957 #ifdef TARGET_NR_oldlstat
4958 case TARGET_NR_oldlstat:
4961 case TARGET_NR_readlink:
4964 p = lock_user_string(arg1);
4965 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4967 ret = -TARGET_EFAULT;
4969 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
4970 char real[PATH_MAX];
4971 temp = realpath(exec_path,real);
4972 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
4973 snprintf((char *)p2, arg3, "%s", real);
4976 ret = get_errno(readlink(path(p), p2, arg3));
4978 unlock_user(p2, arg2, ret);
4979 unlock_user(p, arg1, 0);
4982 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4983 case TARGET_NR_readlinkat:
4986 p = lock_user_string(arg2);
4987 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4989 ret = -TARGET_EFAULT;
4991 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4992 unlock_user(p2, arg3, ret);
4993 unlock_user(p, arg2, 0);
4997 #ifdef TARGET_NR_uselib
4998 case TARGET_NR_uselib:
5001 #ifdef TARGET_NR_swapon
5002 case TARGET_NR_swapon:
5003 if (!(p = lock_user_string(arg1)))
5005 ret = get_errno(swapon(p, arg2));
5006 unlock_user(p, arg1, 0);
5009 case TARGET_NR_reboot:
5011 #ifdef TARGET_NR_readdir
5012 case TARGET_NR_readdir:
5015 #ifdef TARGET_NR_mmap
5016 case TARGET_NR_mmap:
5017 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
5020 abi_ulong v1, v2, v3, v4, v5, v6;
5021 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5029 unlock_user(v, arg1, 0);
5030 ret = get_errno(target_mmap(v1, v2, v3,
5031 target_to_host_bitmask(v4, mmap_flags_tbl),
5035 ret = get_errno(target_mmap(arg1, arg2, arg3,
5036 target_to_host_bitmask(arg4, mmap_flags_tbl),
5042 #ifdef TARGET_NR_mmap2
5043 case TARGET_NR_mmap2:
5045 #define MMAP_SHIFT 12
5047 ret = get_errno(target_mmap(arg1, arg2, arg3,
5048 target_to_host_bitmask(arg4, mmap_flags_tbl),
5050 arg6 << MMAP_SHIFT));
5053 case TARGET_NR_munmap:
5054 ret = get_errno(target_munmap(arg1, arg2));
5056 case TARGET_NR_mprotect:
5057 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5059 #ifdef TARGET_NR_mremap
5060 case TARGET_NR_mremap:
5061 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5064 /* ??? msync/mlock/munlock are broken for softmmu. */
5065 #ifdef TARGET_NR_msync
5066 case TARGET_NR_msync:
5067 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5070 #ifdef TARGET_NR_mlock
5071 case TARGET_NR_mlock:
5072 ret = get_errno(mlock(g2h(arg1), arg2));
5075 #ifdef TARGET_NR_munlock
5076 case TARGET_NR_munlock:
5077 ret = get_errno(munlock(g2h(arg1), arg2));
5080 #ifdef TARGET_NR_mlockall
5081 case TARGET_NR_mlockall:
5082 ret = get_errno(mlockall(arg1));
5085 #ifdef TARGET_NR_munlockall
5086 case TARGET_NR_munlockall:
5087 ret = get_errno(munlockall());
5090 case TARGET_NR_truncate:
5091 if (!(p = lock_user_string(arg1)))
5093 ret = get_errno(truncate(p, arg2));
5094 unlock_user(p, arg1, 0);
5096 case TARGET_NR_ftruncate:
5097 ret = get_errno(ftruncate(arg1, arg2));
5099 case TARGET_NR_fchmod:
5100 ret = get_errno(fchmod(arg1, arg2));
5102 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5103 case TARGET_NR_fchmodat:
5104 if (!(p = lock_user_string(arg2)))
5106 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
5107 unlock_user(p, arg2, 0);
5110 case TARGET_NR_getpriority:
5111 /* libc does special remapping of the return value of
5112 * sys_getpriority() so it's just easiest to call
5113 * sys_getpriority() directly rather than through libc. */
5114 ret = sys_getpriority(arg1, arg2);
5116 case TARGET_NR_setpriority:
5117 ret = get_errno(setpriority(arg1, arg2, arg3));
5119 #ifdef TARGET_NR_profil
5120 case TARGET_NR_profil:
5123 case TARGET_NR_statfs:
5124 if (!(p = lock_user_string(arg1)))
5126 ret = get_errno(statfs(path(p), &stfs));
5127 unlock_user(p, arg1, 0);
5129 if (!is_error(ret)) {
5130 struct target_statfs *target_stfs;
5132 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5134 __put_user(stfs.f_type, &target_stfs->f_type);
5135 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5136 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5137 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5138 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5139 __put_user(stfs.f_files, &target_stfs->f_files);
5140 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5141 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5142 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5143 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5144 unlock_user_struct(target_stfs, arg2, 1);
5147 case TARGET_NR_fstatfs:
5148 ret = get_errno(fstatfs(arg1, &stfs));
5149 goto convert_statfs;
5150 #ifdef TARGET_NR_statfs64
5151 case TARGET_NR_statfs64:
5152 if (!(p = lock_user_string(arg1)))
5154 ret = get_errno(statfs(path(p), &stfs));
5155 unlock_user(p, arg1, 0);
5157 if (!is_error(ret)) {
5158 struct target_statfs64 *target_stfs;
5160 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5162 __put_user(stfs.f_type, &target_stfs->f_type);
5163 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5164 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5165 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5166 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5167 __put_user(stfs.f_files, &target_stfs->f_files);
5168 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5169 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5170 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5171 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5172 unlock_user_struct(target_stfs, arg3, 1);
5175 case TARGET_NR_fstatfs64:
5176 ret = get_errno(fstatfs(arg1, &stfs));
5177 goto convert_statfs64;
5179 #ifdef TARGET_NR_ioperm
5180 case TARGET_NR_ioperm:
5183 #ifdef TARGET_NR_socketcall
5184 case TARGET_NR_socketcall:
5185 ret = do_socketcall(arg1, arg2);
5188 #ifdef TARGET_NR_accept
5189 case TARGET_NR_accept:
5190 ret = do_accept(arg1, arg2, arg3);
5193 #ifdef TARGET_NR_bind
5194 case TARGET_NR_bind:
5195 ret = do_bind(arg1, arg2, arg3);
5198 #ifdef TARGET_NR_connect
5199 case TARGET_NR_connect:
5200 ret = do_connect(arg1, arg2, arg3);
5203 #ifdef TARGET_NR_getpeername
5204 case TARGET_NR_getpeername:
5205 ret = do_getpeername(arg1, arg2, arg3);
5208 #ifdef TARGET_NR_getsockname
5209 case TARGET_NR_getsockname:
5210 ret = do_getsockname(arg1, arg2, arg3);
5213 #ifdef TARGET_NR_getsockopt
5214 case TARGET_NR_getsockopt:
5215 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5218 #ifdef TARGET_NR_listen
5219 case TARGET_NR_listen:
5220 ret = get_errno(listen(arg1, arg2));
5223 #ifdef TARGET_NR_recv
5224 case TARGET_NR_recv:
5225 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5228 #ifdef TARGET_NR_recvfrom
5229 case TARGET_NR_recvfrom:
5230 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5233 #ifdef TARGET_NR_recvmsg
5234 case TARGET_NR_recvmsg:
5235 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5238 #ifdef TARGET_NR_send
5239 case TARGET_NR_send:
5240 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5243 #ifdef TARGET_NR_sendmsg
5244 case TARGET_NR_sendmsg:
5245 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5248 #ifdef TARGET_NR_sendto
5249 case TARGET_NR_sendto:
5250 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5253 #ifdef TARGET_NR_shutdown
5254 case TARGET_NR_shutdown:
5255 ret = get_errno(shutdown(arg1, arg2));
5258 #ifdef TARGET_NR_socket
5259 case TARGET_NR_socket:
5260 ret = do_socket(arg1, arg2, arg3);
5263 #ifdef TARGET_NR_socketpair
5264 case TARGET_NR_socketpair:
5265 ret = do_socketpair(arg1, arg2, arg3, arg4);
5268 #ifdef TARGET_NR_setsockopt
5269 case TARGET_NR_setsockopt:
5270 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5274 case TARGET_NR_syslog:
5275 if (!(p = lock_user_string(arg2)))
5277 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5278 unlock_user(p, arg2, 0);
5281 case TARGET_NR_setitimer:
5283 struct itimerval value, ovalue, *pvalue;
5287 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5288 || copy_from_user_timeval(&pvalue->it_value,
5289 arg2 + sizeof(struct target_timeval)))
5294 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5295 if (!is_error(ret) && arg3) {
5296 if (copy_to_user_timeval(arg3,
5297 &ovalue.it_interval)
5298 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5304 case TARGET_NR_getitimer:
5306 struct itimerval value;
5308 ret = get_errno(getitimer(arg1, &value));
5309 if (!is_error(ret) && arg2) {
5310 if (copy_to_user_timeval(arg2,
5312 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5318 case TARGET_NR_stat:
5319 if (!(p = lock_user_string(arg1)))
5321 ret = get_errno(stat(path(p), &st));
5322 unlock_user(p, arg1, 0);
5324 case TARGET_NR_lstat:
5325 if (!(p = lock_user_string(arg1)))
5327 ret = get_errno(lstat(path(p), &st));
5328 unlock_user(p, arg1, 0);
5330 case TARGET_NR_fstat:
5332 ret = get_errno(fstat(arg1, &st));
5334 if (!is_error(ret)) {
5335 struct target_stat *target_st;
5337 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5339 __put_user(st.st_dev, &target_st->st_dev);
5340 __put_user(st.st_ino, &target_st->st_ino);
5341 __put_user(st.st_mode, &target_st->st_mode);
5342 __put_user(st.st_uid, &target_st->st_uid);
5343 __put_user(st.st_gid, &target_st->st_gid);
5344 __put_user(st.st_nlink, &target_st->st_nlink);
5345 __put_user(st.st_rdev, &target_st->st_rdev);
5346 __put_user(st.st_size, &target_st->st_size);
5347 __put_user(st.st_blksize, &target_st->st_blksize);
5348 __put_user(st.st_blocks, &target_st->st_blocks);
5349 __put_user(st.st_atime, &target_st->target_st_atime);
5350 __put_user(st.st_mtime, &target_st->target_st_mtime);
5351 __put_user(st.st_ctime, &target_st->target_st_ctime);
5352 unlock_user_struct(target_st, arg2, 1);
5356 #ifdef TARGET_NR_olduname
5357 case TARGET_NR_olduname:
5360 #ifdef TARGET_NR_iopl
5361 case TARGET_NR_iopl:
5364 case TARGET_NR_vhangup:
5365 ret = get_errno(vhangup());
5367 #ifdef TARGET_NR_idle
5368 case TARGET_NR_idle:
5371 #ifdef TARGET_NR_syscall
5372 case TARGET_NR_syscall:
5373 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5376 case TARGET_NR_wait4:
5379 abi_long status_ptr = arg2;
5380 struct rusage rusage, *rusage_ptr;
5381 abi_ulong target_rusage = arg4;
5383 rusage_ptr = &rusage;
5386 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5387 if (!is_error(ret)) {
5389 if (put_user_s32(status, status_ptr))
5393 host_to_target_rusage(target_rusage, &rusage);
5397 #ifdef TARGET_NR_swapoff
5398 case TARGET_NR_swapoff:
5399 if (!(p = lock_user_string(arg1)))
5401 ret = get_errno(swapoff(p));
5402 unlock_user(p, arg1, 0);
5405 case TARGET_NR_sysinfo:
5407 struct target_sysinfo *target_value;
5408 struct sysinfo value;
5409 ret = get_errno(sysinfo(&value));
5410 if (!is_error(ret) && arg1)
5412 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5414 __put_user(value.uptime, &target_value->uptime);
5415 __put_user(value.loads[0], &target_value->loads[0]);
5416 __put_user(value.loads[1], &target_value->loads[1]);
5417 __put_user(value.loads[2], &target_value->loads[2]);
5418 __put_user(value.totalram, &target_value->totalram);
5419 __put_user(value.freeram, &target_value->freeram);
5420 __put_user(value.sharedram, &target_value->sharedram);
5421 __put_user(value.bufferram, &target_value->bufferram);
5422 __put_user(value.totalswap, &target_value->totalswap);
5423 __put_user(value.freeswap, &target_value->freeswap);
5424 __put_user(value.procs, &target_value->procs);
5425 __put_user(value.totalhigh, &target_value->totalhigh);
5426 __put_user(value.freehigh, &target_value->freehigh);
5427 __put_user(value.mem_unit, &target_value->mem_unit);
5428 unlock_user_struct(target_value, arg1, 1);
5432 #ifdef TARGET_NR_ipc
5434 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5437 #ifdef TARGET_NR_semget
5438 case TARGET_NR_semget:
5439 ret = get_errno(semget(arg1, arg2, arg3));
5442 #ifdef TARGET_NR_semop
5443 case TARGET_NR_semop:
5444 ret = get_errno(do_semop(arg1, arg2, arg3));
5447 #ifdef TARGET_NR_semctl
5448 case TARGET_NR_semctl:
5449 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5452 #ifdef TARGET_NR_msgctl
5453 case TARGET_NR_msgctl:
5454 ret = do_msgctl(arg1, arg2, arg3);
5457 #ifdef TARGET_NR_msgget
5458 case TARGET_NR_msgget:
5459 ret = get_errno(msgget(arg1, arg2));
5462 #ifdef TARGET_NR_msgrcv
5463 case TARGET_NR_msgrcv:
5464 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5467 #ifdef TARGET_NR_msgsnd
5468 case TARGET_NR_msgsnd:
5469 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5472 #ifdef TARGET_NR_shmget
5473 case TARGET_NR_shmget:
5474 ret = get_errno(shmget(arg1, arg2, arg3));
5477 #ifdef TARGET_NR_shmctl
5478 case TARGET_NR_shmctl:
5479 ret = do_shmctl(arg1, arg2, arg3);
5482 #ifdef TARGET_NR_shmat
5483 case TARGET_NR_shmat:
5488 err = do_shmat(arg1, arg2, arg3, &_ret);
5489 ret = err ? err : _ret;
5493 #ifdef TARGET_NR_shmdt
5494 case TARGET_NR_shmdt:
5495 ret = do_shmdt(arg1);
5498 case TARGET_NR_fsync:
5499 ret = get_errno(fsync(arg1));
5501 case TARGET_NR_clone:
5502 #if defined(TARGET_SH4)
5503 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5504 #elif defined(TARGET_CRIS)
5505 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5507 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5510 #ifdef __NR_exit_group
5511 /* new thread calls */
5512 case TARGET_NR_exit_group:
5516 gdb_exit(cpu_env, arg1);
5517 ret = get_errno(exit_group(arg1));
5520 case TARGET_NR_setdomainname:
5521 if (!(p = lock_user_string(arg1)))
5523 ret = get_errno(setdomainname(p, arg2));
5524 unlock_user(p, arg1, 0);
5526 case TARGET_NR_uname:
5527 /* no need to transcode because we use the linux syscall */
5529 struct new_utsname * buf;
5531 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5533 ret = get_errno(sys_uname(buf));
5534 if (!is_error(ret)) {
5535 /* Overrite the native machine name with whatever is being
5537 strcpy (buf->machine, UNAME_MACHINE);
5538 /* Allow the user to override the reported release. */
5539 if (qemu_uname_release && *qemu_uname_release)
5540 strcpy (buf->release, qemu_uname_release);
5542 unlock_user_struct(buf, arg1, 1);
5546 case TARGET_NR_modify_ldt:
5547 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5549 #if !defined(TARGET_X86_64)
5550 case TARGET_NR_vm86old:
5552 case TARGET_NR_vm86:
5553 ret = do_vm86(cpu_env, arg1, arg2);
5557 case TARGET_NR_adjtimex:
5559 #ifdef TARGET_NR_create_module
5560 case TARGET_NR_create_module:
5562 case TARGET_NR_init_module:
5563 case TARGET_NR_delete_module:
5564 #ifdef TARGET_NR_get_kernel_syms
5565 case TARGET_NR_get_kernel_syms:
5568 case TARGET_NR_quotactl:
5570 case TARGET_NR_getpgid:
5571 ret = get_errno(getpgid(arg1));
5573 case TARGET_NR_fchdir:
5574 ret = get_errno(fchdir(arg1));
5576 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5577 case TARGET_NR_bdflush:
5580 #ifdef TARGET_NR_sysfs
5581 case TARGET_NR_sysfs:
5584 case TARGET_NR_personality:
5585 ret = get_errno(personality(arg1));
5587 #ifdef TARGET_NR_afs_syscall
5588 case TARGET_NR_afs_syscall:
5591 #ifdef TARGET_NR__llseek /* Not on alpha */
5592 case TARGET_NR__llseek:
5594 #if defined (__x86_64__)
5595 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5596 if (put_user_s64(ret, arg4))
5600 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5601 if (put_user_s64(res, arg4))
5607 case TARGET_NR_getdents:
5608 #if TARGET_ABI_BITS != 32
5610 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5612 struct target_dirent *target_dirp;
5613 struct linux_dirent *dirp;
5614 abi_long count = arg3;
5616 dirp = malloc(count);
5618 ret = -TARGET_ENOMEM;
5622 ret = get_errno(sys_getdents(arg1, dirp, count));
5623 if (!is_error(ret)) {
5624 struct linux_dirent *de;
5625 struct target_dirent *tde;
5627 int reclen, treclen;
5628 int count1, tnamelen;
5632 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5636 reclen = de->d_reclen;
5637 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5638 tde->d_reclen = tswap16(treclen);
5639 tde->d_ino = tswapl(de->d_ino);
5640 tde->d_off = tswapl(de->d_off);
5641 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5644 /* XXX: may not be correct */
5645 pstrcpy(tde->d_name, tnamelen, de->d_name);
5646 de = (struct linux_dirent *)((char *)de + reclen);
5648 tde = (struct target_dirent *)((char *)tde + treclen);
5652 unlock_user(target_dirp, arg2, ret);
5658 struct linux_dirent *dirp;
5659 abi_long count = arg3;
5661 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5663 ret = get_errno(sys_getdents(arg1, dirp, count));
5664 if (!is_error(ret)) {
5665 struct linux_dirent *de;
5670 reclen = de->d_reclen;
5673 de->d_reclen = tswap16(reclen);
5674 tswapls(&de->d_ino);
5675 tswapls(&de->d_off);
5676 de = (struct linux_dirent *)((char *)de + reclen);
5680 unlock_user(dirp, arg2, ret);
5684 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5685 case TARGET_NR_getdents64:
5687 struct linux_dirent64 *dirp;
5688 abi_long count = arg3;
5689 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5691 ret = get_errno(sys_getdents64(arg1, dirp, count));
5692 if (!is_error(ret)) {
5693 struct linux_dirent64 *de;
5698 reclen = de->d_reclen;
5701 de->d_reclen = tswap16(reclen);
5702 tswap64s((uint64_t *)&de->d_ino);
5703 tswap64s((uint64_t *)&de->d_off);
5704 de = (struct linux_dirent64 *)((char *)de + reclen);
5708 unlock_user(dirp, arg2, ret);
5711 #endif /* TARGET_NR_getdents64 */
5712 #ifdef TARGET_NR__newselect
5713 case TARGET_NR__newselect:
5714 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5717 #ifdef TARGET_NR_poll
5718 case TARGET_NR_poll:
5720 struct target_pollfd *target_pfd;
5721 unsigned int nfds = arg2;
5726 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5729 pfd = alloca(sizeof(struct pollfd) * nfds);
5730 for(i = 0; i < nfds; i++) {
5731 pfd[i].fd = tswap32(target_pfd[i].fd);
5732 pfd[i].events = tswap16(target_pfd[i].events);
5734 ret = get_errno(poll(pfd, nfds, timeout));
5735 if (!is_error(ret)) {
5736 for(i = 0; i < nfds; i++) {
5737 target_pfd[i].revents = tswap16(pfd[i].revents);
5739 ret += nfds * (sizeof(struct target_pollfd)
5740 - sizeof(struct pollfd));
5742 unlock_user(target_pfd, arg1, ret);
5746 case TARGET_NR_flock:
5747 /* NOTE: the flock constant seems to be the same for every
5749 ret = get_errno(flock(arg1, arg2));
5751 case TARGET_NR_readv:
5756 vec = alloca(count * sizeof(struct iovec));
5757 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5759 ret = get_errno(readv(arg1, vec, count));
5760 unlock_iovec(vec, arg2, count, 1);
5763 case TARGET_NR_writev:
5768 vec = alloca(count * sizeof(struct iovec));
5769 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5771 ret = get_errno(writev(arg1, vec, count));
5772 unlock_iovec(vec, arg2, count, 0);
5775 case TARGET_NR_getsid:
5776 ret = get_errno(getsid(arg1));
5778 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5779 case TARGET_NR_fdatasync:
5780 ret = get_errno(fdatasync(arg1));
5783 case TARGET_NR__sysctl:
5784 /* We don't implement this, but ENOTDIR is always a safe
5786 ret = -TARGET_ENOTDIR;
5788 case TARGET_NR_sched_setparam:
5790 struct sched_param *target_schp;
5791 struct sched_param schp;
5793 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5795 schp.sched_priority = tswap32(target_schp->sched_priority);
5796 unlock_user_struct(target_schp, arg2, 0);
5797 ret = get_errno(sched_setparam(arg1, &schp));
5800 case TARGET_NR_sched_getparam:
5802 struct sched_param *target_schp;
5803 struct sched_param schp;
5804 ret = get_errno(sched_getparam(arg1, &schp));
5805 if (!is_error(ret)) {
5806 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5808 target_schp->sched_priority = tswap32(schp.sched_priority);
5809 unlock_user_struct(target_schp, arg2, 1);
5813 case TARGET_NR_sched_setscheduler:
5815 struct sched_param *target_schp;
5816 struct sched_param schp;
5817 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5819 schp.sched_priority = tswap32(target_schp->sched_priority);
5820 unlock_user_struct(target_schp, arg3, 0);
5821 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5824 case TARGET_NR_sched_getscheduler:
5825 ret = get_errno(sched_getscheduler(arg1));
5827 case TARGET_NR_sched_yield:
5828 ret = get_errno(sched_yield());
5830 case TARGET_NR_sched_get_priority_max:
5831 ret = get_errno(sched_get_priority_max(arg1));
5833 case TARGET_NR_sched_get_priority_min:
5834 ret = get_errno(sched_get_priority_min(arg1));
5836 case TARGET_NR_sched_rr_get_interval:
5839 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5840 if (!is_error(ret)) {
5841 host_to_target_timespec(arg2, &ts);
5845 case TARGET_NR_nanosleep:
5847 struct timespec req, rem;
5848 target_to_host_timespec(&req, arg1);
5849 ret = get_errno(nanosleep(&req, &rem));
5850 if (is_error(ret) && arg2) {
5851 host_to_target_timespec(arg2, &rem);
5855 #ifdef TARGET_NR_query_module
5856 case TARGET_NR_query_module:
5859 #ifdef TARGET_NR_nfsservctl
5860 case TARGET_NR_nfsservctl:
5863 case TARGET_NR_prctl:
5866 case PR_GET_PDEATHSIG:
5869 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5870 if (!is_error(ret) && arg2
5871 && put_user_ual(deathsig, arg2))
5876 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5880 #ifdef TARGET_NR_arch_prctl
5881 case TARGET_NR_arch_prctl:
5882 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5883 ret = do_arch_prctl(cpu_env, arg1, arg2);
5889 #ifdef TARGET_NR_pread
5890 case TARGET_NR_pread:
5892 if (((CPUARMState *)cpu_env)->eabi)
5895 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5897 ret = get_errno(pread(arg1, p, arg3, arg4));
5898 unlock_user(p, arg2, ret);
5900 case TARGET_NR_pwrite:
5902 if (((CPUARMState *)cpu_env)->eabi)
5905 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5907 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5908 unlock_user(p, arg2, 0);
5911 #ifdef TARGET_NR_pread64
5912 case TARGET_NR_pread64:
5913 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5915 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5916 unlock_user(p, arg2, ret);
5918 case TARGET_NR_pwrite64:
5919 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5921 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5922 unlock_user(p, arg2, 0);
5925 case TARGET_NR_getcwd:
5926 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5928 ret = get_errno(sys_getcwd1(p, arg2));
5929 unlock_user(p, arg1, ret);
5931 case TARGET_NR_capget:
5933 case TARGET_NR_capset:
5935 case TARGET_NR_sigaltstack:
5936 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5937 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5938 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5943 case TARGET_NR_sendfile:
5945 #ifdef TARGET_NR_getpmsg
5946 case TARGET_NR_getpmsg:
5949 #ifdef TARGET_NR_putpmsg
5950 case TARGET_NR_putpmsg:
5953 #ifdef TARGET_NR_vfork
5954 case TARGET_NR_vfork:
5955 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5959 #ifdef TARGET_NR_ugetrlimit
5960 case TARGET_NR_ugetrlimit:
5963 ret = get_errno(getrlimit(arg1, &rlim));
5964 if (!is_error(ret)) {
5965 struct target_rlimit *target_rlim;
5966 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5968 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5969 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5970 unlock_user_struct(target_rlim, arg2, 1);
5975 #ifdef TARGET_NR_truncate64
5976 case TARGET_NR_truncate64:
5977 if (!(p = lock_user_string(arg1)))
5979 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5980 unlock_user(p, arg1, 0);
5983 #ifdef TARGET_NR_ftruncate64
5984 case TARGET_NR_ftruncate64:
5985 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5988 #ifdef TARGET_NR_stat64
5989 case TARGET_NR_stat64:
5990 if (!(p = lock_user_string(arg1)))
5992 ret = get_errno(stat(path(p), &st));
5993 unlock_user(p, arg1, 0);
5995 ret = host_to_target_stat64(cpu_env, arg2, &st);
5998 #ifdef TARGET_NR_lstat64
5999 case TARGET_NR_lstat64:
6000 if (!(p = lock_user_string(arg1)))
6002 ret = get_errno(lstat(path(p), &st));
6003 unlock_user(p, arg1, 0);
6005 ret = host_to_target_stat64(cpu_env, arg2, &st);
6008 #ifdef TARGET_NR_fstat64
6009 case TARGET_NR_fstat64:
6010 ret = get_errno(fstat(arg1, &st));
6012 ret = host_to_target_stat64(cpu_env, arg2, &st);
6015 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6016 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6017 #ifdef TARGET_NR_fstatat64
6018 case TARGET_NR_fstatat64:
6020 #ifdef TARGET_NR_newfstatat
6021 case TARGET_NR_newfstatat:
6023 if (!(p = lock_user_string(arg2)))
6025 #ifdef __NR_fstatat64
6026 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6028 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6031 ret = host_to_target_stat64(cpu_env, arg3, &st);
6035 case TARGET_NR_lchown:
6036 if (!(p = lock_user_string(arg1)))
6038 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6039 unlock_user(p, arg1, 0);
6041 case TARGET_NR_getuid:
6042 ret = get_errno(high2lowuid(getuid()));
6044 case TARGET_NR_getgid:
6045 ret = get_errno(high2lowgid(getgid()));
6047 case TARGET_NR_geteuid:
6048 ret = get_errno(high2lowuid(geteuid()));
6050 case TARGET_NR_getegid:
6051 ret = get_errno(high2lowgid(getegid()));
6053 case TARGET_NR_setreuid:
6054 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6056 case TARGET_NR_setregid:
6057 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6059 case TARGET_NR_getgroups:
6061 int gidsetsize = arg1;
6062 uint16_t *target_grouplist;
6066 grouplist = alloca(gidsetsize * sizeof(gid_t));
6067 ret = get_errno(getgroups(gidsetsize, grouplist));
6068 if (gidsetsize == 0)
6070 if (!is_error(ret)) {
6071 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6072 if (!target_grouplist)
6074 for(i = 0;i < ret; i++)
6075 target_grouplist[i] = tswap16(grouplist[i]);
6076 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6080 case TARGET_NR_setgroups:
6082 int gidsetsize = arg1;
6083 uint16_t *target_grouplist;
6087 grouplist = alloca(gidsetsize * sizeof(gid_t));
6088 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6089 if (!target_grouplist) {
6090 ret = -TARGET_EFAULT;
6093 for(i = 0;i < gidsetsize; i++)
6094 grouplist[i] = tswap16(target_grouplist[i]);
6095 unlock_user(target_grouplist, arg2, 0);
6096 ret = get_errno(setgroups(gidsetsize, grouplist));
6099 case TARGET_NR_fchown:
6100 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6102 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6103 case TARGET_NR_fchownat:
6104 if (!(p = lock_user_string(arg2)))
6106 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6107 unlock_user(p, arg2, 0);
6110 #ifdef TARGET_NR_setresuid
6111 case TARGET_NR_setresuid:
6112 ret = get_errno(setresuid(low2highuid(arg1),
6114 low2highuid(arg3)));
6117 #ifdef TARGET_NR_getresuid
6118 case TARGET_NR_getresuid:
6120 uid_t ruid, euid, suid;
6121 ret = get_errno(getresuid(&ruid, &euid, &suid));
6122 if (!is_error(ret)) {
6123 if (put_user_u16(high2lowuid(ruid), arg1)
6124 || put_user_u16(high2lowuid(euid), arg2)
6125 || put_user_u16(high2lowuid(suid), arg3))
6131 #ifdef TARGET_NR_getresgid
6132 case TARGET_NR_setresgid:
6133 ret = get_errno(setresgid(low2highgid(arg1),
6135 low2highgid(arg3)));
6138 #ifdef TARGET_NR_getresgid
6139 case TARGET_NR_getresgid:
6141 gid_t rgid, egid, sgid;
6142 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6143 if (!is_error(ret)) {
6144 if (put_user_u16(high2lowgid(rgid), arg1)
6145 || put_user_u16(high2lowgid(egid), arg2)
6146 || put_user_u16(high2lowgid(sgid), arg3))
6152 case TARGET_NR_chown:
6153 if (!(p = lock_user_string(arg1)))
6155 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6156 unlock_user(p, arg1, 0);
6158 case TARGET_NR_setuid:
6159 ret = get_errno(setuid(low2highuid(arg1)));
6161 case TARGET_NR_setgid:
6162 ret = get_errno(setgid(low2highgid(arg1)));
6164 case TARGET_NR_setfsuid:
6165 ret = get_errno(setfsuid(arg1));
6167 case TARGET_NR_setfsgid:
6168 ret = get_errno(setfsgid(arg1));
6170 #endif /* USE_UID16 */
6172 #ifdef TARGET_NR_lchown32
6173 case TARGET_NR_lchown32:
6174 if (!(p = lock_user_string(arg1)))
6176 ret = get_errno(lchown(p, arg2, arg3));
6177 unlock_user(p, arg1, 0);
6180 #ifdef TARGET_NR_getuid32
6181 case TARGET_NR_getuid32:
6182 ret = get_errno(getuid());
6186 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6187 /* Alpha specific */
6188 case TARGET_NR_getxuid:
6192 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6194 ret = get_errno(getuid());
6197 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6198 /* Alpha specific */
6199 case TARGET_NR_getxgid:
6203 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6205 ret = get_errno(getgid());
6209 #ifdef TARGET_NR_getgid32
6210 case TARGET_NR_getgid32:
6211 ret = get_errno(getgid());
6214 #ifdef TARGET_NR_geteuid32
6215 case TARGET_NR_geteuid32:
6216 ret = get_errno(geteuid());
6219 #ifdef TARGET_NR_getegid32
6220 case TARGET_NR_getegid32:
6221 ret = get_errno(getegid());
6224 #ifdef TARGET_NR_setreuid32
6225 case TARGET_NR_setreuid32:
6226 ret = get_errno(setreuid(arg1, arg2));
6229 #ifdef TARGET_NR_setregid32
6230 case TARGET_NR_setregid32:
6231 ret = get_errno(setregid(arg1, arg2));
6234 #ifdef TARGET_NR_getgroups32
6235 case TARGET_NR_getgroups32:
6237 int gidsetsize = arg1;
6238 uint32_t *target_grouplist;
6242 grouplist = alloca(gidsetsize * sizeof(gid_t));
6243 ret = get_errno(getgroups(gidsetsize, grouplist));
6244 if (gidsetsize == 0)
6246 if (!is_error(ret)) {
6247 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6248 if (!target_grouplist) {
6249 ret = -TARGET_EFAULT;
6252 for(i = 0;i < ret; i++)
6253 target_grouplist[i] = tswap32(grouplist[i]);
6254 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6259 #ifdef TARGET_NR_setgroups32
6260 case TARGET_NR_setgroups32:
6262 int gidsetsize = arg1;
6263 uint32_t *target_grouplist;
6267 grouplist = alloca(gidsetsize * sizeof(gid_t));
6268 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6269 if (!target_grouplist) {
6270 ret = -TARGET_EFAULT;
6273 for(i = 0;i < gidsetsize; i++)
6274 grouplist[i] = tswap32(target_grouplist[i]);
6275 unlock_user(target_grouplist, arg2, 0);
6276 ret = get_errno(setgroups(gidsetsize, grouplist));
6280 #ifdef TARGET_NR_fchown32
6281 case TARGET_NR_fchown32:
6282 ret = get_errno(fchown(arg1, arg2, arg3));
6285 #ifdef TARGET_NR_setresuid32
6286 case TARGET_NR_setresuid32:
6287 ret = get_errno(setresuid(arg1, arg2, arg3));
6290 #ifdef TARGET_NR_getresuid32
6291 case TARGET_NR_getresuid32:
6293 uid_t ruid, euid, suid;
6294 ret = get_errno(getresuid(&ruid, &euid, &suid));
6295 if (!is_error(ret)) {
6296 if (put_user_u32(ruid, arg1)
6297 || put_user_u32(euid, arg2)
6298 || put_user_u32(suid, arg3))
6304 #ifdef TARGET_NR_setresgid32
6305 case TARGET_NR_setresgid32:
6306 ret = get_errno(setresgid(arg1, arg2, arg3));
6309 #ifdef TARGET_NR_getresgid32
6310 case TARGET_NR_getresgid32:
6312 gid_t rgid, egid, sgid;
6313 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6314 if (!is_error(ret)) {
6315 if (put_user_u32(rgid, arg1)
6316 || put_user_u32(egid, arg2)
6317 || put_user_u32(sgid, arg3))
6323 #ifdef TARGET_NR_chown32
6324 case TARGET_NR_chown32:
6325 if (!(p = lock_user_string(arg1)))
6327 ret = get_errno(chown(p, arg2, arg3));
6328 unlock_user(p, arg1, 0);
6331 #ifdef TARGET_NR_setuid32
6332 case TARGET_NR_setuid32:
6333 ret = get_errno(setuid(arg1));
6336 #ifdef TARGET_NR_setgid32
6337 case TARGET_NR_setgid32:
6338 ret = get_errno(setgid(arg1));
6341 #ifdef TARGET_NR_setfsuid32
6342 case TARGET_NR_setfsuid32:
6343 ret = get_errno(setfsuid(arg1));
6346 #ifdef TARGET_NR_setfsgid32
6347 case TARGET_NR_setfsgid32:
6348 ret = get_errno(setfsgid(arg1));
6352 case TARGET_NR_pivot_root:
6354 #ifdef TARGET_NR_mincore
6355 case TARGET_NR_mincore:
6358 ret = -TARGET_EFAULT;
6359 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6361 if (!(p = lock_user_string(arg3)))
6363 ret = get_errno(mincore(a, arg2, p));
6364 unlock_user(p, arg3, ret);
6366 unlock_user(a, arg1, 0);
6370 #ifdef TARGET_NR_arm_fadvise64_64
6371 case TARGET_NR_arm_fadvise64_64:
6374 * arm_fadvise64_64 looks like fadvise64_64 but
6375 * with different argument order
6383 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6384 #ifdef TARGET_NR_fadvise64_64
6385 case TARGET_NR_fadvise64_64:
6387 /* This is a hint, so ignoring and returning success is ok. */
6391 #ifdef TARGET_NR_madvise
6392 case TARGET_NR_madvise:
6393 /* A straight passthrough may not be safe because qemu sometimes
6394 turns private flie-backed mappings into anonymous mappings.
6395 This will break MADV_DONTNEED.
6396 This is a hint, so ignoring and returning success is ok. */
6400 #if TARGET_ABI_BITS == 32
6401 case TARGET_NR_fcntl64:
6405 struct target_flock64 *target_fl;
6407 struct target_eabi_flock64 *target_efl;
6411 case TARGET_F_GETLK64:
6414 case TARGET_F_SETLK64:
6417 case TARGET_F_SETLKW64:
6426 case TARGET_F_GETLK64:
6428 if (((CPUARMState *)cpu_env)->eabi) {
6429 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6431 fl.l_type = tswap16(target_efl->l_type);
6432 fl.l_whence = tswap16(target_efl->l_whence);
6433 fl.l_start = tswap64(target_efl->l_start);
6434 fl.l_len = tswap64(target_efl->l_len);
6435 fl.l_pid = tswapl(target_efl->l_pid);
6436 unlock_user_struct(target_efl, arg3, 0);
6440 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6442 fl.l_type = tswap16(target_fl->l_type);
6443 fl.l_whence = tswap16(target_fl->l_whence);
6444 fl.l_start = tswap64(target_fl->l_start);
6445 fl.l_len = tswap64(target_fl->l_len);
6446 fl.l_pid = tswapl(target_fl->l_pid);
6447 unlock_user_struct(target_fl, arg3, 0);
6449 ret = get_errno(fcntl(arg1, cmd, &fl));
6452 if (((CPUARMState *)cpu_env)->eabi) {
6453 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6455 target_efl->l_type = tswap16(fl.l_type);
6456 target_efl->l_whence = tswap16(fl.l_whence);
6457 target_efl->l_start = tswap64(fl.l_start);
6458 target_efl->l_len = tswap64(fl.l_len);
6459 target_efl->l_pid = tswapl(fl.l_pid);
6460 unlock_user_struct(target_efl, arg3, 1);
6464 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6466 target_fl->l_type = tswap16(fl.l_type);
6467 target_fl->l_whence = tswap16(fl.l_whence);
6468 target_fl->l_start = tswap64(fl.l_start);
6469 target_fl->l_len = tswap64(fl.l_len);
6470 target_fl->l_pid = tswapl(fl.l_pid);
6471 unlock_user_struct(target_fl, arg3, 1);
6476 case TARGET_F_SETLK64:
6477 case TARGET_F_SETLKW64:
6479 if (((CPUARMState *)cpu_env)->eabi) {
6480 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6482 fl.l_type = tswap16(target_efl->l_type);
6483 fl.l_whence = tswap16(target_efl->l_whence);
6484 fl.l_start = tswap64(target_efl->l_start);
6485 fl.l_len = tswap64(target_efl->l_len);
6486 fl.l_pid = tswapl(target_efl->l_pid);
6487 unlock_user_struct(target_efl, arg3, 0);
6491 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6493 fl.l_type = tswap16(target_fl->l_type);
6494 fl.l_whence = tswap16(target_fl->l_whence);
6495 fl.l_start = tswap64(target_fl->l_start);
6496 fl.l_len = tswap64(target_fl->l_len);
6497 fl.l_pid = tswapl(target_fl->l_pid);
6498 unlock_user_struct(target_fl, arg3, 0);
6500 ret = get_errno(fcntl(arg1, cmd, &fl));
6503 ret = do_fcntl(arg1, cmd, arg3);
6509 #ifdef TARGET_NR_cacheflush
6510 case TARGET_NR_cacheflush:
6511 /* self-modifying code is handled automatically, so nothing needed */
6515 #ifdef TARGET_NR_security
6516 case TARGET_NR_security:
6519 #ifdef TARGET_NR_getpagesize
6520 case TARGET_NR_getpagesize:
6521 ret = TARGET_PAGE_SIZE;
6524 case TARGET_NR_gettid:
6525 ret = get_errno(gettid());
6527 #ifdef TARGET_NR_readahead
6528 case TARGET_NR_readahead:
6529 #if TARGET_ABI_BITS == 32
6531 if (((CPUARMState *)cpu_env)->eabi)
6538 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6540 ret = get_errno(readahead(arg1, arg2, arg3));
6544 #ifdef TARGET_NR_setxattr
6545 case TARGET_NR_setxattr:
6546 case TARGET_NR_lsetxattr:
6547 case TARGET_NR_fsetxattr:
6548 case TARGET_NR_getxattr:
6549 case TARGET_NR_lgetxattr:
6550 case TARGET_NR_fgetxattr:
6551 case TARGET_NR_listxattr:
6552 case TARGET_NR_llistxattr:
6553 case TARGET_NR_flistxattr:
6554 case TARGET_NR_removexattr:
6555 case TARGET_NR_lremovexattr:
6556 case TARGET_NR_fremovexattr:
6557 goto unimplemented_nowarn;
6559 #ifdef TARGET_NR_set_thread_area
6560 case TARGET_NR_set_thread_area:
6561 #if defined(TARGET_MIPS)
6562 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6565 #elif defined(TARGET_CRIS)
6567 ret = -TARGET_EINVAL;
6569 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6573 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6574 ret = do_set_thread_area(cpu_env, arg1);
6577 goto unimplemented_nowarn;
6580 #ifdef TARGET_NR_get_thread_area
6581 case TARGET_NR_get_thread_area:
6582 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6583 ret = do_get_thread_area(cpu_env, arg1);
6585 goto unimplemented_nowarn;
6588 #ifdef TARGET_NR_getdomainname
6589 case TARGET_NR_getdomainname:
6590 goto unimplemented_nowarn;
6593 #ifdef TARGET_NR_clock_gettime
6594 case TARGET_NR_clock_gettime:
6597 ret = get_errno(clock_gettime(arg1, &ts));
6598 if (!is_error(ret)) {
6599 host_to_target_timespec(arg2, &ts);
6604 #ifdef TARGET_NR_clock_getres
6605 case TARGET_NR_clock_getres:
6608 ret = get_errno(clock_getres(arg1, &ts));
6609 if (!is_error(ret)) {
6610 host_to_target_timespec(arg2, &ts);
6615 #ifdef TARGET_NR_clock_nanosleep
6616 case TARGET_NR_clock_nanosleep:
6619 target_to_host_timespec(&ts, arg3);
6620 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6622 host_to_target_timespec(arg4, &ts);
6627 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6628 case TARGET_NR_set_tid_address:
6629 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6633 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6634 case TARGET_NR_tkill:
6635 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6639 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6640 case TARGET_NR_tgkill:
6641 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6642 target_to_host_signal(arg3)));
6646 #ifdef TARGET_NR_set_robust_list
6647 case TARGET_NR_set_robust_list:
6648 goto unimplemented_nowarn;
6651 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6652 case TARGET_NR_utimensat:
6654 struct timespec ts[2];
6655 target_to_host_timespec(ts, arg3);
6656 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6658 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6660 if (!(p = lock_user_string(arg2))) {
6661 ret = -TARGET_EFAULT;
6664 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6665 unlock_user(p, arg2, 0);
6670 #if defined(USE_NPTL)
6671 case TARGET_NR_futex:
6672 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6675 #ifdef TARGET_NR_inotify_init
6676 case TARGET_NR_inotify_init:
6677 ret = get_errno(sys_inotify_init());
6680 #ifdef TARGET_NR_inotify_add_watch
6681 case TARGET_NR_inotify_add_watch:
6682 p = lock_user_string(arg2);
6683 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6684 unlock_user(p, arg2, 0);
6687 #ifdef TARGET_NR_inotify_rm_watch
6688 case TARGET_NR_inotify_rm_watch:
6689 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6693 #ifdef TARGET_NR_mq_open
6694 case TARGET_NR_mq_open:
6696 struct mq_attr posix_mq_attr;
6698 p = lock_user_string(arg1 - 1);
6700 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6701 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6702 unlock_user (p, arg1, 0);
6706 case TARGET_NR_mq_unlink:
6707 p = lock_user_string(arg1 - 1);
6708 ret = get_errno(mq_unlink(p));
6709 unlock_user (p, arg1, 0);
6712 case TARGET_NR_mq_timedsend:
6716 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6718 target_to_host_timespec(&ts, arg5);
6719 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6720 host_to_target_timespec(arg5, &ts);
6723 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6724 unlock_user (p, arg2, arg3);
6728 case TARGET_NR_mq_timedreceive:
6733 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6735 target_to_host_timespec(&ts, arg5);
6736 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6737 host_to_target_timespec(arg5, &ts);
6740 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6741 unlock_user (p, arg2, arg3);
6743 put_user_u32(prio, arg4);
6747 /* Not implemented for now... */
6748 /* case TARGET_NR_mq_notify: */
6751 case TARGET_NR_mq_getsetattr:
6753 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6756 ret = mq_getattr(arg1, &posix_mq_attr_out);
6757 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6760 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6761 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6770 gemu_log("qemu: Unsupported syscall: %d\n", num);
6771 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6772 unimplemented_nowarn:
6774 ret = -TARGET_ENOSYS;
6779 gemu_log(" = %ld\n", ret);
6782 print_syscall_ret(num, ret);
6785 ret = -TARGET_EFAULT;