4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
49 #include <sys/times.h>
52 #include <sys/statfs.h>
54 #include <sys/sysinfo.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <qemu-common.h>
63 #define termios host_termios
64 #define winsize host_winsize
65 #define termio host_termio
66 #define sgttyb host_sgttyb /* same as target */
67 #define tchars host_tchars /* same as target */
68 #define ltchars host_ltchars /* same as target */
70 #include <linux/termios.h>
71 #include <linux/unistd.h>
72 #include <linux/utsname.h>
73 #include <linux/cdrom.h>
74 #include <linux/hdreg.h>
75 #include <linux/soundcard.h>
77 #include <linux/mtio.h>
78 #include "linux_loop.h"
81 #include "qemu-common.h"
84 #include <linux/futex.h>
85 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
86 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
88 /* XXX: Hardcode the above values. */
89 #define CLONE_NPTL_FLAGS2 0
94 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
95 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
96 /* 16 bit uid wrappers emulation */
100 //#include <linux/msdos_fs.h>
101 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
102 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
113 #define _syscall0(type,name) \
114 static type name (void) \
116 return syscall(__NR_##name); \
119 #define _syscall1(type,name,type1,arg1) \
120 static type name (type1 arg1) \
122 return syscall(__NR_##name, arg1); \
125 #define _syscall2(type,name,type1,arg1,type2,arg2) \
126 static type name (type1 arg1,type2 arg2) \
128 return syscall(__NR_##name, arg1, arg2); \
131 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
132 static type name (type1 arg1,type2 arg2,type3 arg3) \
134 return syscall(__NR_##name, arg1, arg2, arg3); \
137 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
138 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
140 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
143 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
145 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
147 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
151 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
152 type5,arg5,type6,arg6) \
153 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
156 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
160 #define __NR_sys_exit __NR_exit
161 #define __NR_sys_uname __NR_uname
162 #define __NR_sys_faccessat __NR_faccessat
163 #define __NR_sys_fchmodat __NR_fchmodat
164 #define __NR_sys_fchownat __NR_fchownat
165 #define __NR_sys_fstatat64 __NR_fstatat64
166 #define __NR_sys_futimesat __NR_futimesat
167 #define __NR_sys_getcwd1 __NR_getcwd
168 #define __NR_sys_getdents __NR_getdents
169 #define __NR_sys_getdents64 __NR_getdents64
170 #define __NR_sys_getpriority __NR_getpriority
171 #define __NR_sys_linkat __NR_linkat
172 #define __NR_sys_mkdirat __NR_mkdirat
173 #define __NR_sys_mknodat __NR_mknodat
174 #define __NR_sys_newfstatat __NR_newfstatat
175 #define __NR_sys_openat __NR_openat
176 #define __NR_sys_readlinkat __NR_readlinkat
177 #define __NR_sys_renameat __NR_renameat
178 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
179 #define __NR_sys_symlinkat __NR_symlinkat
180 #define __NR_sys_syslog __NR_syslog
181 #define __NR_sys_tgkill __NR_tgkill
182 #define __NR_sys_tkill __NR_tkill
183 #define __NR_sys_unlinkat __NR_unlinkat
184 #define __NR_sys_utimensat __NR_utimensat
185 #define __NR_sys_futex __NR_futex
186 #define __NR_sys_inotify_init __NR_inotify_init
187 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
188 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
190 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
191 #define __NR__llseek __NR_lseek
195 _syscall0(int, gettid)
197 /* This is a replacement for the host gettid() and must return a host
199 static int gettid(void) {
203 _syscall1(int,sys_exit,int,status)
204 _syscall1(int,sys_uname,struct new_utsname *,buf)
205 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
206 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
208 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
209 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
210 mode_t,mode,int,flags)
212 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
213 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
214 uid_t,owner,gid_t,group,int,flags)
216 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
217 defined(__NR_fstatat64)
218 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
219 struct stat *,buf,int,flags)
221 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
222 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
223 const struct timeval *,times)
225 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
226 #if TARGET_ABI_BITS == 32
227 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
229 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
230 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
232 _syscall2(int, sys_getpriority, int, which, int, who);
233 #if !defined (__x86_64__)
234 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
235 loff_t *, res, uint, wh);
237 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
238 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
239 int,newdirfd,const char *,newpath,int,flags)
241 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
242 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
244 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
245 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
246 mode_t,mode,dev_t,dev)
248 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
249 defined(__NR_newfstatat)
250 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
251 struct stat *,buf,int,flags)
253 #if defined(TARGET_NR_openat) && defined(__NR_openat)
254 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
256 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
257 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
258 char *,buf,size_t,bufsize)
260 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
261 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
262 int,newdirfd,const char *,newpath)
264 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
265 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
266 _syscall3(int,sys_symlinkat,const char *,oldpath,
267 int,newdirfd,const char *,newpath)
269 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
270 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
271 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
273 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
274 _syscall2(int,sys_tkill,int,tid,int,sig)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group,int,error_code)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address,int *,tidptr)
282 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
283 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
285 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
286 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
287 const struct timespec *,tsp,int,flags)
289 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
290 _syscall0(int,sys_inotify_init)
292 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
293 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
295 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
296 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
298 #if defined(USE_NPTL)
299 #if defined(TARGET_NR_futex) && defined(__NR_futex)
300 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
301 const struct timespec *,timeout,int *,uaddr2,int,val3)
305 extern int personality(int);
306 extern int flock(int, int);
307 extern int setfsuid(int);
308 extern int setfsgid(int);
309 extern int setgroups(int, gid_t *);
311 #define ERRNO_TABLE_SIZE 1200
313 /* target_to_host_errno_table[] is initialized from
314 * host_to_target_errno_table[] in syscall_init(). */
315 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
319 * This list is the union of errno values overridden in asm-<arch>/errno.h
320 * minus the errnos that are not actually generic to all archs.
322 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
323 [EIDRM] = TARGET_EIDRM,
324 [ECHRNG] = TARGET_ECHRNG,
325 [EL2NSYNC] = TARGET_EL2NSYNC,
326 [EL3HLT] = TARGET_EL3HLT,
327 [EL3RST] = TARGET_EL3RST,
328 [ELNRNG] = TARGET_ELNRNG,
329 [EUNATCH] = TARGET_EUNATCH,
330 [ENOCSI] = TARGET_ENOCSI,
331 [EL2HLT] = TARGET_EL2HLT,
332 [EDEADLK] = TARGET_EDEADLK,
333 [ENOLCK] = TARGET_ENOLCK,
334 [EBADE] = TARGET_EBADE,
335 [EBADR] = TARGET_EBADR,
336 [EXFULL] = TARGET_EXFULL,
337 [ENOANO] = TARGET_ENOANO,
338 [EBADRQC] = TARGET_EBADRQC,
339 [EBADSLT] = TARGET_EBADSLT,
340 [EBFONT] = TARGET_EBFONT,
341 [ENOSTR] = TARGET_ENOSTR,
342 [ENODATA] = TARGET_ENODATA,
343 [ETIME] = TARGET_ETIME,
344 [ENOSR] = TARGET_ENOSR,
345 [ENONET] = TARGET_ENONET,
346 [ENOPKG] = TARGET_ENOPKG,
347 [EREMOTE] = TARGET_EREMOTE,
348 [ENOLINK] = TARGET_ENOLINK,
349 [EADV] = TARGET_EADV,
350 [ESRMNT] = TARGET_ESRMNT,
351 [ECOMM] = TARGET_ECOMM,
352 [EPROTO] = TARGET_EPROTO,
353 [EDOTDOT] = TARGET_EDOTDOT,
354 [EMULTIHOP] = TARGET_EMULTIHOP,
355 [EBADMSG] = TARGET_EBADMSG,
356 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
357 [EOVERFLOW] = TARGET_EOVERFLOW,
358 [ENOTUNIQ] = TARGET_ENOTUNIQ,
359 [EBADFD] = TARGET_EBADFD,
360 [EREMCHG] = TARGET_EREMCHG,
361 [ELIBACC] = TARGET_ELIBACC,
362 [ELIBBAD] = TARGET_ELIBBAD,
363 [ELIBSCN] = TARGET_ELIBSCN,
364 [ELIBMAX] = TARGET_ELIBMAX,
365 [ELIBEXEC] = TARGET_ELIBEXEC,
366 [EILSEQ] = TARGET_EILSEQ,
367 [ENOSYS] = TARGET_ENOSYS,
368 [ELOOP] = TARGET_ELOOP,
369 [ERESTART] = TARGET_ERESTART,
370 [ESTRPIPE] = TARGET_ESTRPIPE,
371 [ENOTEMPTY] = TARGET_ENOTEMPTY,
372 [EUSERS] = TARGET_EUSERS,
373 [ENOTSOCK] = TARGET_ENOTSOCK,
374 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
375 [EMSGSIZE] = TARGET_EMSGSIZE,
376 [EPROTOTYPE] = TARGET_EPROTOTYPE,
377 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
378 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
379 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
380 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
381 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
382 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
383 [EADDRINUSE] = TARGET_EADDRINUSE,
384 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
385 [ENETDOWN] = TARGET_ENETDOWN,
386 [ENETUNREACH] = TARGET_ENETUNREACH,
387 [ENETRESET] = TARGET_ENETRESET,
388 [ECONNABORTED] = TARGET_ECONNABORTED,
389 [ECONNRESET] = TARGET_ECONNRESET,
390 [ENOBUFS] = TARGET_ENOBUFS,
391 [EISCONN] = TARGET_EISCONN,
392 [ENOTCONN] = TARGET_ENOTCONN,
393 [EUCLEAN] = TARGET_EUCLEAN,
394 [ENOTNAM] = TARGET_ENOTNAM,
395 [ENAVAIL] = TARGET_ENAVAIL,
396 [EISNAM] = TARGET_EISNAM,
397 [EREMOTEIO] = TARGET_EREMOTEIO,
398 [ESHUTDOWN] = TARGET_ESHUTDOWN,
399 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
400 [ETIMEDOUT] = TARGET_ETIMEDOUT,
401 [ECONNREFUSED] = TARGET_ECONNREFUSED,
402 [EHOSTDOWN] = TARGET_EHOSTDOWN,
403 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
404 [EALREADY] = TARGET_EALREADY,
405 [EINPROGRESS] = TARGET_EINPROGRESS,
406 [ESTALE] = TARGET_ESTALE,
407 [ECANCELED] = TARGET_ECANCELED,
408 [ENOMEDIUM] = TARGET_ENOMEDIUM,
409 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
411 [ENOKEY] = TARGET_ENOKEY,
414 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
417 [EKEYREVOKED] = TARGET_EKEYREVOKED,
420 [EKEYREJECTED] = TARGET_EKEYREJECTED,
423 [EOWNERDEAD] = TARGET_EOWNERDEAD,
425 #ifdef ENOTRECOVERABLE
426 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
430 static inline int host_to_target_errno(int err)
432 if(host_to_target_errno_table[err])
433 return host_to_target_errno_table[err];
437 static inline int target_to_host_errno(int err)
439 if (target_to_host_errno_table[err])
440 return target_to_host_errno_table[err];
444 static inline abi_long get_errno(abi_long ret)
447 return -host_to_target_errno(errno);
452 static inline int is_error(abi_long ret)
454 return (abi_ulong)ret >= (abi_ulong)(-4096);
457 char *target_strerror(int err)
459 return strerror(target_to_host_errno(err));
462 static abi_ulong target_brk;
463 static abi_ulong target_original_brk;
465 void target_set_brk(abi_ulong new_brk)
467 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
470 /* do_brk() must return target values and target errnos. */
471 abi_long do_brk(abi_ulong new_brk)
474 abi_long mapped_addr;
479 if (new_brk < target_original_brk)
482 brk_page = HOST_PAGE_ALIGN(target_brk);
484 /* If the new brk is less than this, set it and we're done... */
485 if (new_brk < brk_page) {
486 target_brk = new_brk;
490 /* We need to allocate more memory after the brk... */
491 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
492 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
493 PROT_READ|PROT_WRITE,
494 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
496 if (!is_error(mapped_addr))
497 target_brk = new_brk;
502 static inline abi_long copy_from_user_fdset(fd_set *fds,
503 abi_ulong target_fds_addr,
507 abi_ulong b, *target_fds;
509 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
510 if (!(target_fds = lock_user(VERIFY_READ,
512 sizeof(abi_ulong) * nw,
514 return -TARGET_EFAULT;
518 for (i = 0; i < nw; i++) {
519 /* grab the abi_ulong */
520 __get_user(b, &target_fds[i]);
521 for (j = 0; j < TARGET_ABI_BITS; j++) {
522 /* check the bit inside the abi_ulong */
529 unlock_user(target_fds, target_fds_addr, 0);
534 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
540 abi_ulong *target_fds;
542 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
543 if (!(target_fds = lock_user(VERIFY_WRITE,
545 sizeof(abi_ulong) * nw,
547 return -TARGET_EFAULT;
550 for (i = 0; i < nw; i++) {
552 for (j = 0; j < TARGET_ABI_BITS; j++) {
553 v |= ((FD_ISSET(k, fds) != 0) << j);
556 __put_user(v, &target_fds[i]);
559 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
564 #if defined(__alpha__)
570 static inline abi_long host_to_target_clock_t(long ticks)
572 #if HOST_HZ == TARGET_HZ
575 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
579 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
580 const struct rusage *rusage)
582 struct target_rusage *target_rusage;
584 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
585 return -TARGET_EFAULT;
586 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
587 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
588 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
589 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
590 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
591 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
592 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
593 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
594 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
595 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
596 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
597 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
598 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
599 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
600 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
601 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
602 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
603 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
604 unlock_user_struct(target_rusage, target_addr, 1);
609 static inline abi_long copy_from_user_timeval(struct timeval *tv,
610 abi_ulong target_tv_addr)
612 struct target_timeval *target_tv;
614 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
615 return -TARGET_EFAULT;
617 __get_user(tv->tv_sec, &target_tv->tv_sec);
618 __get_user(tv->tv_usec, &target_tv->tv_usec);
620 unlock_user_struct(target_tv, target_tv_addr, 0);
625 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
626 const struct timeval *tv)
628 struct target_timeval *target_tv;
630 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
631 return -TARGET_EFAULT;
633 __put_user(tv->tv_sec, &target_tv->tv_sec);
634 __put_user(tv->tv_usec, &target_tv->tv_usec);
636 unlock_user_struct(target_tv, target_tv_addr, 1);
641 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
642 abi_ulong target_mq_attr_addr)
644 struct target_mq_attr *target_mq_attr;
646 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
647 target_mq_attr_addr, 1))
648 return -TARGET_EFAULT;
650 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
651 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
652 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
653 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
655 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
660 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
661 const struct mq_attr *attr)
663 struct target_mq_attr *target_mq_attr;
665 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
666 target_mq_attr_addr, 0))
667 return -TARGET_EFAULT;
669 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
670 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
671 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
672 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
674 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
679 /* do_select() must return target values and target errnos. */
680 static abi_long do_select(int n,
681 abi_ulong rfd_addr, abi_ulong wfd_addr,
682 abi_ulong efd_addr, abi_ulong target_tv_addr)
684 fd_set rfds, wfds, efds;
685 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
686 struct timeval tv, *tv_ptr;
690 if (copy_from_user_fdset(&rfds, rfd_addr, n))
691 return -TARGET_EFAULT;
697 if (copy_from_user_fdset(&wfds, wfd_addr, n))
698 return -TARGET_EFAULT;
704 if (copy_from_user_fdset(&efds, efd_addr, n))
705 return -TARGET_EFAULT;
711 if (target_tv_addr) {
712 if (copy_from_user_timeval(&tv, target_tv_addr))
713 return -TARGET_EFAULT;
719 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
721 if (!is_error(ret)) {
722 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
723 return -TARGET_EFAULT;
724 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
725 return -TARGET_EFAULT;
726 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
727 return -TARGET_EFAULT;
729 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
730 return -TARGET_EFAULT;
736 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
737 abi_ulong target_addr,
740 struct target_sockaddr *target_saddr;
742 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
744 return -TARGET_EFAULT;
745 memcpy(addr, target_saddr, len);
746 addr->sa_family = tswap16(target_saddr->sa_family);
747 unlock_user(target_saddr, target_addr, 0);
752 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
753 struct sockaddr *addr,
756 struct target_sockaddr *target_saddr;
758 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
760 return -TARGET_EFAULT;
761 memcpy(target_saddr, addr, len);
762 target_saddr->sa_family = tswap16(addr->sa_family);
763 unlock_user(target_saddr, target_addr, len);
768 /* ??? Should this also swap msgh->name? */
769 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
770 struct target_msghdr *target_msgh)
772 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
773 abi_long msg_controllen;
774 abi_ulong target_cmsg_addr;
775 struct target_cmsghdr *target_cmsg;
778 msg_controllen = tswapl(target_msgh->msg_controllen);
779 if (msg_controllen < sizeof (struct target_cmsghdr))
781 target_cmsg_addr = tswapl(target_msgh->msg_control);
782 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
784 return -TARGET_EFAULT;
786 while (cmsg && target_cmsg) {
787 void *data = CMSG_DATA(cmsg);
788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
790 int len = tswapl(target_cmsg->cmsg_len)
791 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
793 space += CMSG_SPACE(len);
794 if (space > msgh->msg_controllen) {
795 space -= CMSG_SPACE(len);
796 gemu_log("Host cmsg overflow\n");
800 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
801 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
802 cmsg->cmsg_len = CMSG_LEN(len);
804 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
805 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
806 memcpy(data, target_data, len);
808 int *fd = (int *)data;
809 int *target_fd = (int *)target_data;
810 int i, numfds = len / sizeof(int);
812 for (i = 0; i < numfds; i++)
813 fd[i] = tswap32(target_fd[i]);
816 cmsg = CMSG_NXTHDR(msgh, cmsg);
817 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
819 unlock_user(target_cmsg, target_cmsg_addr, 0);
821 msgh->msg_controllen = space;
825 /* ??? Should this also swap msgh->name? */
826 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
829 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
830 abi_long msg_controllen;
831 abi_ulong target_cmsg_addr;
832 struct target_cmsghdr *target_cmsg;
835 msg_controllen = tswapl(target_msgh->msg_controllen);
836 if (msg_controllen < sizeof (struct target_cmsghdr))
838 target_cmsg_addr = tswapl(target_msgh->msg_control);
839 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
841 return -TARGET_EFAULT;
843 while (cmsg && target_cmsg) {
844 void *data = CMSG_DATA(cmsg);
845 void *target_data = TARGET_CMSG_DATA(target_cmsg);
847 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
849 space += TARGET_CMSG_SPACE(len);
850 if (space > msg_controllen) {
851 space -= TARGET_CMSG_SPACE(len);
852 gemu_log("Target cmsg overflow\n");
856 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
857 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
858 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
860 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
861 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
862 memcpy(target_data, data, len);
864 int *fd = (int *)data;
865 int *target_fd = (int *)target_data;
866 int i, numfds = len / sizeof(int);
868 for (i = 0; i < numfds; i++)
869 target_fd[i] = tswap32(fd[i]);
872 cmsg = CMSG_NXTHDR(msgh, cmsg);
873 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
875 unlock_user(target_cmsg, target_cmsg_addr, space);
877 target_msgh->msg_controllen = tswapl(space);
881 /* do_setsockopt() Must return target values and target errnos. */
882 static abi_long do_setsockopt(int sockfd, int level, int optname,
883 abi_ulong optval_addr, socklen_t optlen)
890 /* TCP options all take an 'int' value. */
891 if (optlen < sizeof(uint32_t))
892 return -TARGET_EINVAL;
894 if (get_user_u32(val, optval_addr))
895 return -TARGET_EFAULT;
896 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
903 case IP_ROUTER_ALERT:
907 case IP_MTU_DISCOVER:
913 case IP_MULTICAST_TTL:
914 case IP_MULTICAST_LOOP:
916 if (optlen >= sizeof(uint32_t)) {
917 if (get_user_u32(val, optval_addr))
918 return -TARGET_EFAULT;
919 } else if (optlen >= 1) {
920 if (get_user_u8(val, optval_addr))
921 return -TARGET_EFAULT;
923 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
929 case TARGET_SOL_SOCKET:
931 /* Options with 'int' argument. */
932 case TARGET_SO_DEBUG:
935 case TARGET_SO_REUSEADDR:
936 optname = SO_REUSEADDR;
941 case TARGET_SO_ERROR:
944 case TARGET_SO_DONTROUTE:
945 optname = SO_DONTROUTE;
947 case TARGET_SO_BROADCAST:
948 optname = SO_BROADCAST;
950 case TARGET_SO_SNDBUF:
953 case TARGET_SO_RCVBUF:
956 case TARGET_SO_KEEPALIVE:
957 optname = SO_KEEPALIVE;
959 case TARGET_SO_OOBINLINE:
960 optname = SO_OOBINLINE;
962 case TARGET_SO_NO_CHECK:
963 optname = SO_NO_CHECK;
965 case TARGET_SO_PRIORITY:
966 optname = SO_PRIORITY;
969 case TARGET_SO_BSDCOMPAT:
970 optname = SO_BSDCOMPAT;
973 case TARGET_SO_PASSCRED:
974 optname = SO_PASSCRED;
976 case TARGET_SO_TIMESTAMP:
977 optname = SO_TIMESTAMP;
979 case TARGET_SO_RCVLOWAT:
980 optname = SO_RCVLOWAT;
982 case TARGET_SO_RCVTIMEO:
983 optname = SO_RCVTIMEO;
985 case TARGET_SO_SNDTIMEO:
986 optname = SO_SNDTIMEO;
992 if (optlen < sizeof(uint32_t))
993 return -TARGET_EINVAL;
995 if (get_user_u32(val, optval_addr))
996 return -TARGET_EFAULT;
997 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1001 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1002 ret = -TARGET_ENOPROTOOPT;
1007 /* do_getsockopt() Must return target values and target errnos. */
1008 static abi_long do_getsockopt(int sockfd, int level, int optname,
1009 abi_ulong optval_addr, abi_ulong optlen)
1016 case TARGET_SOL_SOCKET:
1019 case TARGET_SO_LINGER:
1020 case TARGET_SO_RCVTIMEO:
1021 case TARGET_SO_SNDTIMEO:
1022 case TARGET_SO_PEERCRED:
1023 case TARGET_SO_PEERNAME:
1024 /* These don't just return a single integer */
1031 /* TCP options all take an 'int' value. */
1033 if (get_user_u32(len, optlen))
1034 return -TARGET_EFAULT;
1036 return -TARGET_EINVAL;
1038 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1045 if (put_user_u32(val, optval_addr))
1046 return -TARGET_EFAULT;
1048 if (put_user_u8(val, optval_addr))
1049 return -TARGET_EFAULT;
1051 if (put_user_u32(len, optlen))
1052 return -TARGET_EFAULT;
1059 case IP_ROUTER_ALERT:
1063 case IP_MTU_DISCOVER:
1069 case IP_MULTICAST_TTL:
1070 case IP_MULTICAST_LOOP:
1071 if (get_user_u32(len, optlen))
1072 return -TARGET_EFAULT;
1074 return -TARGET_EINVAL;
1076 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1079 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1081 if (put_user_u32(len, optlen)
1082 || put_user_u8(val, optval_addr))
1083 return -TARGET_EFAULT;
1085 if (len > sizeof(int))
1087 if (put_user_u32(len, optlen)
1088 || put_user_u32(val, optval_addr))
1089 return -TARGET_EFAULT;
1093 ret = -TARGET_ENOPROTOOPT;
1099 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1101 ret = -TARGET_EOPNOTSUPP;
1108 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1109 * other lock functions have a return code of 0 for failure.
1111 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1112 int count, int copy)
1114 struct target_iovec *target_vec;
1118 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1120 return -TARGET_EFAULT;
1121 for(i = 0;i < count; i++) {
1122 base = tswapl(target_vec[i].iov_base);
1123 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1124 if (vec[i].iov_len != 0) {
1125 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1126 /* Don't check lock_user return value. We must call writev even
1127 if a element has invalid base address. */
1129 /* zero length pointer is ignored */
1130 vec[i].iov_base = NULL;
1133 unlock_user (target_vec, target_addr, 0);
1137 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1138 int count, int copy)
1140 struct target_iovec *target_vec;
1144 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1146 return -TARGET_EFAULT;
1147 for(i = 0;i < count; i++) {
1148 if (target_vec[i].iov_base) {
1149 base = tswapl(target_vec[i].iov_base);
1150 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1153 unlock_user (target_vec, target_addr, 0);
1158 /* do_socket() Must return target values and target errnos. */
1159 static abi_long do_socket(int domain, int type, int protocol)
1161 #if defined(TARGET_MIPS)
1163 case TARGET_SOCK_DGRAM:
1166 case TARGET_SOCK_STREAM:
1169 case TARGET_SOCK_RAW:
1172 case TARGET_SOCK_RDM:
1175 case TARGET_SOCK_SEQPACKET:
1176 type = SOCK_SEQPACKET;
1178 case TARGET_SOCK_PACKET:
1183 if (domain == PF_NETLINK)
1184 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1185 return get_errno(socket(domain, type, protocol));
1188 /* MAX_SOCK_ADDR from linux/net/socket.c */
1189 #define MAX_SOCK_ADDR 128
1191 /* do_bind() Must return target values and target errnos. */
1192 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1197 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1198 return -TARGET_EINVAL;
1200 addr = alloca(addrlen);
1202 target_to_host_sockaddr(addr, target_addr, addrlen);
1203 return get_errno(bind(sockfd, addr, addrlen));
1206 /* do_connect() Must return target values and target errnos. */
1207 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1212 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1213 return -TARGET_EINVAL;
1215 addr = alloca(addrlen);
1217 target_to_host_sockaddr(addr, target_addr, addrlen);
1218 return get_errno(connect(sockfd, addr, addrlen));
1221 /* do_sendrecvmsg() Must return target values and target errnos. */
1222 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1223 int flags, int send)
1226 struct target_msghdr *msgp;
1230 abi_ulong target_vec;
1233 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1237 return -TARGET_EFAULT;
1238 if (msgp->msg_name) {
1239 msg.msg_namelen = tswap32(msgp->msg_namelen);
1240 msg.msg_name = alloca(msg.msg_namelen);
1241 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1244 msg.msg_name = NULL;
1245 msg.msg_namelen = 0;
1247 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1248 msg.msg_control = alloca(msg.msg_controllen);
1249 msg.msg_flags = tswap32(msgp->msg_flags);
1251 count = tswapl(msgp->msg_iovlen);
1252 vec = alloca(count * sizeof(struct iovec));
1253 target_vec = tswapl(msgp->msg_iov);
1254 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1255 msg.msg_iovlen = count;
1259 ret = target_to_host_cmsg(&msg, msgp);
1261 ret = get_errno(sendmsg(fd, &msg, flags));
1263 ret = get_errno(recvmsg(fd, &msg, flags));
1264 if (!is_error(ret)) {
1266 ret = host_to_target_cmsg(msgp, &msg);
1271 unlock_iovec(vec, target_vec, count, !send);
1272 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1276 /* do_accept() Must return target values and target errnos. */
1277 static abi_long do_accept(int fd, abi_ulong target_addr,
1278 abi_ulong target_addrlen_addr)
1284 if (target_addr == 0)
1285 return get_errno(accept(fd, NULL, NULL));
1287 if (get_user_u32(addrlen, target_addrlen_addr))
1288 return -TARGET_EFAULT;
1290 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1291 return -TARGET_EINVAL;
1293 addr = alloca(addrlen);
1295 ret = get_errno(accept(fd, addr, &addrlen));
1296 if (!is_error(ret)) {
1297 host_to_target_sockaddr(target_addr, addr, addrlen);
1298 if (put_user_u32(addrlen, target_addrlen_addr))
1299 ret = -TARGET_EFAULT;
1304 /* do_getpeername() Must return target values and target errnos. */
1305 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1306 abi_ulong target_addrlen_addr)
1312 if (get_user_u32(addrlen, target_addrlen_addr))
1313 return -TARGET_EFAULT;
1315 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1316 return -TARGET_EINVAL;
1318 addr = alloca(addrlen);
1320 ret = get_errno(getpeername(fd, addr, &addrlen));
1321 if (!is_error(ret)) {
1322 host_to_target_sockaddr(target_addr, addr, addrlen);
1323 if (put_user_u32(addrlen, target_addrlen_addr))
1324 ret = -TARGET_EFAULT;
1329 /* do_getsockname() Must return target values and target errnos. */
1330 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1331 abi_ulong target_addrlen_addr)
1337 if (get_user_u32(addrlen, target_addrlen_addr))
1338 return -TARGET_EFAULT;
1340 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1341 return -TARGET_EINVAL;
1343 addr = alloca(addrlen);
1345 ret = get_errno(getsockname(fd, addr, &addrlen));
1346 if (!is_error(ret)) {
1347 host_to_target_sockaddr(target_addr, addr, addrlen);
1348 if (put_user_u32(addrlen, target_addrlen_addr))
1349 ret = -TARGET_EFAULT;
1354 /* do_socketpair() Must return target values and target errnos. */
1355 static abi_long do_socketpair(int domain, int type, int protocol,
1356 abi_ulong target_tab_addr)
1361 ret = get_errno(socketpair(domain, type, protocol, tab));
1362 if (!is_error(ret)) {
1363 if (put_user_s32(tab[0], target_tab_addr)
1364 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1365 ret = -TARGET_EFAULT;
1370 /* do_sendto() Must return target values and target errnos. */
1371 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1372 abi_ulong target_addr, socklen_t addrlen)
1378 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR)
1379 return -TARGET_EINVAL;
1381 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1383 return -TARGET_EFAULT;
1385 addr = alloca(addrlen);
1386 target_to_host_sockaddr(addr, target_addr, addrlen);
1387 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1389 ret = get_errno(send(fd, host_msg, len, flags));
1391 unlock_user(host_msg, msg, 0);
1395 /* do_recvfrom() Must return target values and target errnos. */
1396 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1397 abi_ulong target_addr,
1398 abi_ulong target_addrlen)
1405 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1407 return -TARGET_EFAULT;
1409 if (get_user_u32(addrlen, target_addrlen)) {
1410 ret = -TARGET_EFAULT;
1413 if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) {
1414 ret = -TARGET_EINVAL;
1417 addr = alloca(addrlen);
1418 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1420 addr = NULL; /* To keep compiler quiet. */
1421 ret = get_errno(recv(fd, host_msg, len, flags));
1423 if (!is_error(ret)) {
1425 host_to_target_sockaddr(target_addr, addr, addrlen);
1426 if (put_user_u32(addrlen, target_addrlen)) {
1427 ret = -TARGET_EFAULT;
1431 unlock_user(host_msg, msg, len);
1434 unlock_user(host_msg, msg, 0);
1439 #ifdef TARGET_NR_socketcall
1440 /* do_socketcall() Must return target values and target errnos. */
1441 static abi_long do_socketcall(int num, abi_ulong vptr)
1444 const int n = sizeof(abi_ulong);
1449 int domain, type, protocol;
1451 if (get_user_s32(domain, vptr)
1452 || get_user_s32(type, vptr + n)
1453 || get_user_s32(protocol, vptr + 2 * n))
1454 return -TARGET_EFAULT;
1456 ret = do_socket(domain, type, protocol);
1462 abi_ulong target_addr;
1465 if (get_user_s32(sockfd, vptr)
1466 || get_user_ual(target_addr, vptr + n)
1467 || get_user_u32(addrlen, vptr + 2 * n))
1468 return -TARGET_EFAULT;
1470 ret = do_bind(sockfd, target_addr, addrlen);
1473 case SOCKOP_connect:
1476 abi_ulong target_addr;
1479 if (get_user_s32(sockfd, vptr)
1480 || get_user_ual(target_addr, vptr + n)
1481 || get_user_u32(addrlen, vptr + 2 * n))
1482 return -TARGET_EFAULT;
1484 ret = do_connect(sockfd, target_addr, addrlen);
1489 int sockfd, backlog;
1491 if (get_user_s32(sockfd, vptr)
1492 || get_user_s32(backlog, vptr + n))
1493 return -TARGET_EFAULT;
1495 ret = get_errno(listen(sockfd, backlog));
1501 abi_ulong target_addr, target_addrlen;
1503 if (get_user_s32(sockfd, vptr)
1504 || get_user_ual(target_addr, vptr + n)
1505 || get_user_u32(target_addrlen, vptr + 2 * n))
1506 return -TARGET_EFAULT;
1508 ret = do_accept(sockfd, target_addr, target_addrlen);
1511 case SOCKOP_getsockname:
1514 abi_ulong target_addr, target_addrlen;
1516 if (get_user_s32(sockfd, vptr)
1517 || get_user_ual(target_addr, vptr + n)
1518 || get_user_u32(target_addrlen, vptr + 2 * n))
1519 return -TARGET_EFAULT;
1521 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1524 case SOCKOP_getpeername:
1527 abi_ulong target_addr, target_addrlen;
1529 if (get_user_s32(sockfd, vptr)
1530 || get_user_ual(target_addr, vptr + n)
1531 || get_user_u32(target_addrlen, vptr + 2 * n))
1532 return -TARGET_EFAULT;
1534 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1537 case SOCKOP_socketpair:
1539 int domain, type, protocol;
1542 if (get_user_s32(domain, vptr)
1543 || get_user_s32(type, vptr + n)
1544 || get_user_s32(protocol, vptr + 2 * n)
1545 || get_user_ual(tab, vptr + 3 * n))
1546 return -TARGET_EFAULT;
1548 ret = do_socketpair(domain, type, protocol, tab);
1558 if (get_user_s32(sockfd, vptr)
1559 || get_user_ual(msg, vptr + n)
1560 || get_user_ual(len, vptr + 2 * n)
1561 || get_user_s32(flags, vptr + 3 * n))
1562 return -TARGET_EFAULT;
1564 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1574 if (get_user_s32(sockfd, vptr)
1575 || get_user_ual(msg, vptr + n)
1576 || get_user_ual(len, vptr + 2 * n)
1577 || get_user_s32(flags, vptr + 3 * n))
1578 return -TARGET_EFAULT;
1580 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1592 if (get_user_s32(sockfd, vptr)
1593 || get_user_ual(msg, vptr + n)
1594 || get_user_ual(len, vptr + 2 * n)
1595 || get_user_s32(flags, vptr + 3 * n)
1596 || get_user_ual(addr, vptr + 4 * n)
1597 || get_user_u32(addrlen, vptr + 5 * n))
1598 return -TARGET_EFAULT;
1600 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1603 case SOCKOP_recvfrom:
1612 if (get_user_s32(sockfd, vptr)
1613 || get_user_ual(msg, vptr + n)
1614 || get_user_ual(len, vptr + 2 * n)
1615 || get_user_s32(flags, vptr + 3 * n)
1616 || get_user_ual(addr, vptr + 4 * n)
1617 || get_user_u32(addrlen, vptr + 5 * n))
1618 return -TARGET_EFAULT;
1620 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1623 case SOCKOP_shutdown:
1627 if (get_user_s32(sockfd, vptr)
1628 || get_user_s32(how, vptr + n))
1629 return -TARGET_EFAULT;
1631 ret = get_errno(shutdown(sockfd, how));
1634 case SOCKOP_sendmsg:
1635 case SOCKOP_recvmsg:
1638 abi_ulong target_msg;
1641 if (get_user_s32(fd, vptr)
1642 || get_user_ual(target_msg, vptr + n)
1643 || get_user_s32(flags, vptr + 2 * n))
1644 return -TARGET_EFAULT;
1646 ret = do_sendrecvmsg(fd, target_msg, flags,
1647 (num == SOCKOP_sendmsg));
1650 case SOCKOP_setsockopt:
1658 if (get_user_s32(sockfd, vptr)
1659 || get_user_s32(level, vptr + n)
1660 || get_user_s32(optname, vptr + 2 * n)
1661 || get_user_ual(optval, vptr + 3 * n)
1662 || get_user_u32(optlen, vptr + 4 * n))
1663 return -TARGET_EFAULT;
1665 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1668 case SOCKOP_getsockopt:
1676 if (get_user_s32(sockfd, vptr)
1677 || get_user_s32(level, vptr + n)
1678 || get_user_s32(optname, vptr + 2 * n)
1679 || get_user_ual(optval, vptr + 3 * n)
1680 || get_user_u32(optlen, vptr + 4 * n))
1681 return -TARGET_EFAULT;
1683 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1687 gemu_log("Unsupported socketcall: %d\n", num);
1688 ret = -TARGET_ENOSYS;
1695 #define N_SHM_REGIONS 32
1697 static struct shm_region {
1700 } shm_regions[N_SHM_REGIONS];
1702 struct target_ipc_perm
1709 unsigned short int mode;
1710 unsigned short int __pad1;
1711 unsigned short int __seq;
1712 unsigned short int __pad2;
1713 abi_ulong __unused1;
1714 abi_ulong __unused2;
1717 struct target_semid_ds
1719 struct target_ipc_perm sem_perm;
1720 abi_ulong sem_otime;
1721 abi_ulong __unused1;
1722 abi_ulong sem_ctime;
1723 abi_ulong __unused2;
1724 abi_ulong sem_nsems;
1725 abi_ulong __unused3;
1726 abi_ulong __unused4;
1729 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1730 abi_ulong target_addr)
1732 struct target_ipc_perm *target_ip;
1733 struct target_semid_ds *target_sd;
1735 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1736 return -TARGET_EFAULT;
1737 target_ip=&(target_sd->sem_perm);
1738 host_ip->__key = tswapl(target_ip->__key);
1739 host_ip->uid = tswapl(target_ip->uid);
1740 host_ip->gid = tswapl(target_ip->gid);
1741 host_ip->cuid = tswapl(target_ip->cuid);
1742 host_ip->cgid = tswapl(target_ip->cgid);
1743 host_ip->mode = tswapl(target_ip->mode);
1744 unlock_user_struct(target_sd, target_addr, 0);
1748 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1749 struct ipc_perm *host_ip)
1751 struct target_ipc_perm *target_ip;
1752 struct target_semid_ds *target_sd;
1754 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1755 return -TARGET_EFAULT;
1756 target_ip = &(target_sd->sem_perm);
1757 target_ip->__key = tswapl(host_ip->__key);
1758 target_ip->uid = tswapl(host_ip->uid);
1759 target_ip->gid = tswapl(host_ip->gid);
1760 target_ip->cuid = tswapl(host_ip->cuid);
1761 target_ip->cgid = tswapl(host_ip->cgid);
1762 target_ip->mode = tswapl(host_ip->mode);
1763 unlock_user_struct(target_sd, target_addr, 1);
1767 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1768 abi_ulong target_addr)
1770 struct target_semid_ds *target_sd;
1772 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1773 return -TARGET_EFAULT;
1774 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
1775 return -TARGET_EFAULT;
1776 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1777 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1778 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1779 unlock_user_struct(target_sd, target_addr, 0);
1783 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1784 struct semid_ds *host_sd)
1786 struct target_semid_ds *target_sd;
1788 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1789 return -TARGET_EFAULT;
1790 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
1791 return -TARGET_EFAULT;;
1792 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1793 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1794 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1795 unlock_user_struct(target_sd, target_addr, 1);
1799 struct target_seminfo {
1812 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
1813 struct seminfo *host_seminfo)
1815 struct target_seminfo *target_seminfo;
1816 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
1817 return -TARGET_EFAULT;
1818 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
1819 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
1820 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
1821 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
1822 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
1823 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
1824 __put_user(host_seminfo->semume, &target_seminfo->semume);
1825 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
1826 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
1827 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
1828 unlock_user_struct(target_seminfo, target_addr, 1);
1834 struct semid_ds *buf;
1835 unsigned short *array;
1836 struct seminfo *__buf;
1839 union target_semun {
1846 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
1847 abi_ulong target_addr)
1850 unsigned short *array;
1852 struct semid_ds semid_ds;
1855 semun.buf = &semid_ds;
1857 ret = semctl(semid, 0, IPC_STAT, semun);
1859 return get_errno(ret);
1861 nsems = semid_ds.sem_nsems;
1863 *host_array = malloc(nsems*sizeof(unsigned short));
1864 array = lock_user(VERIFY_READ, target_addr,
1865 nsems*sizeof(unsigned short), 1);
1867 return -TARGET_EFAULT;
1869 for(i=0; i<nsems; i++) {
1870 __get_user((*host_array)[i], &array[i]);
1872 unlock_user(array, target_addr, 0);
1877 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
1878 unsigned short **host_array)
1881 unsigned short *array;
1883 struct semid_ds semid_ds;
1886 semun.buf = &semid_ds;
1888 ret = semctl(semid, 0, IPC_STAT, semun);
1890 return get_errno(ret);
1892 nsems = semid_ds.sem_nsems;
1894 array = lock_user(VERIFY_WRITE, target_addr,
1895 nsems*sizeof(unsigned short), 0);
1897 return -TARGET_EFAULT;
1899 for(i=0; i<nsems; i++) {
1900 __put_user((*host_array)[i], &array[i]);
1903 unlock_user(array, target_addr, 1);
1908 static inline abi_long do_semctl(int semid, int semnum, int cmd,
1909 union target_semun target_su)
1912 struct semid_ds dsarg;
1913 unsigned short *array;
1914 struct seminfo seminfo;
1915 abi_long ret = -TARGET_EINVAL;
1924 err = target_to_host_semid_ds(&dsarg, target_su.buf);
1928 ret = get_errno(semctl(semid, semnum, cmd, arg));
1929 err = host_to_target_semid_ds(target_su.buf, &dsarg);
1935 arg.val = tswapl(target_su.val);
1936 ret = get_errno(semctl(semid, semnum, cmd, arg));
1937 target_su.val = tswapl(arg.val);
1941 err = target_to_host_semarray(semid, &array, target_su.array);
1945 ret = get_errno(semctl(semid, semnum, cmd, arg));
1946 err = host_to_target_semarray(semid, target_su.array, &array);
1952 arg.__buf = &seminfo;
1953 ret = get_errno(semctl(semid, semnum, cmd, arg));
1954 err = host_to_target_seminfo(target_su.__buf, &seminfo);
1962 ret = get_errno(semctl(semid, semnum, cmd, NULL));
1969 struct target_sembuf {
1970 unsigned short sem_num;
1975 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
1976 abi_ulong target_addr,
1979 struct target_sembuf *target_sembuf;
1982 target_sembuf = lock_user(VERIFY_READ, target_addr,
1983 nsops*sizeof(struct target_sembuf), 1);
1985 return -TARGET_EFAULT;
1987 for(i=0; i<nsops; i++) {
1988 __put_user(target_sembuf[i].sem_num, &host_sembuf[i].sem_num);
1989 __put_user(target_sembuf[i].sem_op, &host_sembuf[i].sem_op);
1990 __put_user(target_sembuf[i].sem_flg, &host_sembuf[i].sem_flg);
1993 unlock_user(target_sembuf, target_addr, 0);
1998 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2000 struct sembuf sops[nsops];
2002 if (target_to_host_sembuf(sops, ptr, nsops))
2003 return -TARGET_EFAULT;
2005 return semop(semid, sops, nsops);
2008 struct target_msqid_ds
2010 struct target_ipc_perm msg_perm;
2011 abi_ulong msg_stime;
2012 #if TARGET_ABI_BITS == 32
2013 abi_ulong __unused1;
2015 abi_ulong msg_rtime;
2016 #if TARGET_ABI_BITS == 32
2017 abi_ulong __unused2;
2019 abi_ulong msg_ctime;
2020 #if TARGET_ABI_BITS == 32
2021 abi_ulong __unused3;
2023 abi_ulong __msg_cbytes;
2025 abi_ulong msg_qbytes;
2026 abi_ulong msg_lspid;
2027 abi_ulong msg_lrpid;
2028 abi_ulong __unused4;
2029 abi_ulong __unused5;
2032 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2033 abi_ulong target_addr)
2035 struct target_msqid_ds *target_md;
2037 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2038 return -TARGET_EFAULT;
2039 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2040 return -TARGET_EFAULT;
2041 host_md->msg_stime = tswapl(target_md->msg_stime);
2042 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2043 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2044 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2045 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2046 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2047 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2048 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2049 unlock_user_struct(target_md, target_addr, 0);
2053 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2054 struct msqid_ds *host_md)
2056 struct target_msqid_ds *target_md;
2058 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2059 return -TARGET_EFAULT;
2060 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2061 return -TARGET_EFAULT;
2062 target_md->msg_stime = tswapl(host_md->msg_stime);
2063 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2064 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2065 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2066 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2067 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2068 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2069 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2070 unlock_user_struct(target_md, target_addr, 1);
2074 struct target_msginfo {
2082 unsigned short int msgseg;
2085 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2086 struct msginfo *host_msginfo)
2088 struct target_msginfo *target_msginfo;
2089 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2090 return -TARGET_EFAULT;
2091 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2092 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2093 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2094 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2095 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2096 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2097 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2098 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2099 unlock_user_struct(target_msginfo, target_addr, 1);
2103 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2105 struct msqid_ds dsarg;
2106 struct msginfo msginfo;
2107 abi_long ret = -TARGET_EINVAL;
2115 if (target_to_host_msqid_ds(&dsarg,ptr))
2116 return -TARGET_EFAULT;
2117 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2118 if (host_to_target_msqid_ds(ptr,&dsarg))
2119 return -TARGET_EFAULT;
2122 ret = get_errno(msgctl(msgid, cmd, NULL));
2126 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2127 if (host_to_target_msginfo(ptr, &msginfo))
2128 return -TARGET_EFAULT;
2135 struct target_msgbuf {
2140 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2141 unsigned int msgsz, int msgflg)
2143 struct target_msgbuf *target_mb;
2144 struct msgbuf *host_mb;
2147 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2148 return -TARGET_EFAULT;
2149 host_mb = malloc(msgsz+sizeof(long));
2150 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2151 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2152 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2154 unlock_user_struct(target_mb, msgp, 0);
2159 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2160 unsigned int msgsz, abi_long msgtyp,
2163 struct target_msgbuf *target_mb;
2165 struct msgbuf *host_mb;
2168 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2169 return -TARGET_EFAULT;
2171 host_mb = malloc(msgsz+sizeof(long));
2172 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2175 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2176 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2177 if (!target_mtext) {
2178 ret = -TARGET_EFAULT;
2181 memcpy(target_mb->mtext, host_mb->mtext, ret);
2182 unlock_user(target_mtext, target_mtext_addr, ret);
2185 target_mb->mtype = tswapl(host_mb->mtype);
2190 unlock_user_struct(target_mb, msgp, 1);
2194 struct target_shmid_ds
2196 struct target_ipc_perm shm_perm;
2197 abi_ulong shm_segsz;
2198 abi_ulong shm_atime;
2199 #if TARGET_ABI_BITS == 32
2200 abi_ulong __unused1;
2202 abi_ulong shm_dtime;
2203 #if TARGET_ABI_BITS == 32
2204 abi_ulong __unused2;
2206 abi_ulong shm_ctime;
2207 #if TARGET_ABI_BITS == 32
2208 abi_ulong __unused3;
2212 abi_ulong shm_nattch;
2213 unsigned long int __unused4;
2214 unsigned long int __unused5;
2217 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2218 abi_ulong target_addr)
2220 struct target_shmid_ds *target_sd;
2222 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2223 return -TARGET_EFAULT;
2224 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2225 return -TARGET_EFAULT;
2226 __put_user(target_sd->shm_segsz, &host_sd->shm_segsz);
2227 __put_user(target_sd->shm_atime, &host_sd->shm_atime);
2228 __put_user(target_sd->shm_dtime, &host_sd->shm_dtime);
2229 __put_user(target_sd->shm_ctime, &host_sd->shm_ctime);
2230 __put_user(target_sd->shm_cpid, &host_sd->shm_cpid);
2231 __put_user(target_sd->shm_lpid, &host_sd->shm_lpid);
2232 __put_user(target_sd->shm_nattch, &host_sd->shm_nattch);
2233 unlock_user_struct(target_sd, target_addr, 0);
2237 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2238 struct shmid_ds *host_sd)
2240 struct target_shmid_ds *target_sd;
2242 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2243 return -TARGET_EFAULT;
2244 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2245 return -TARGET_EFAULT;
2246 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2247 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2248 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2249 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2250 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2251 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2252 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2253 unlock_user_struct(target_sd, target_addr, 1);
2257 struct target_shminfo {
2265 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2266 struct shminfo *host_shminfo)
2268 struct target_shminfo *target_shminfo;
2269 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2270 return -TARGET_EFAULT;
2271 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2272 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2273 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2274 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2275 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2276 unlock_user_struct(target_shminfo, target_addr, 1);
2280 struct target_shm_info {
2285 abi_ulong swap_attempts;
2286 abi_ulong swap_successes;
2289 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2290 struct shm_info *host_shm_info)
2292 struct target_shm_info *target_shm_info;
2293 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2294 return -TARGET_EFAULT;
2295 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2296 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2297 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2298 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2299 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2300 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2301 unlock_user_struct(target_shm_info, target_addr, 1);
2305 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2307 struct shmid_ds dsarg;
2308 struct shminfo shminfo;
2309 struct shm_info shm_info;
2310 abi_long ret = -TARGET_EINVAL;
2318 if (target_to_host_shmid_ds(&dsarg, buf))
2319 return -TARGET_EFAULT;
2320 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2321 if (host_to_target_shmid_ds(buf, &dsarg))
2322 return -TARGET_EFAULT;
2325 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2326 if (host_to_target_shminfo(buf, &shminfo))
2327 return -TARGET_EFAULT;
2330 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2331 if (host_to_target_shm_info(buf, &shm_info))
2332 return -TARGET_EFAULT;
2337 ret = get_errno(shmctl(shmid, cmd, NULL));
2344 static inline abi_long do_shmat(int shmid, abi_ulong shmaddr, int shmflg,
2345 unsigned long *raddr)
2347 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
2349 struct shmid_ds shm_info;
2352 /* find out the length of the shared memory segment */
2353 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2354 if (is_error(ret)) {
2355 /* can't get length, bail out */
2356 return get_errno(ret);
2362 *raddr = (unsigned long) shmat(shmid, g2h(shmaddr), shmflg);
2364 abi_ulong mmap_start;
2366 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2368 if (mmap_start == -1) {
2372 *raddr = (unsigned long) shmat(shmid, g2h(mmap_start),
2373 shmflg | SHM_REMAP);
2378 return get_errno(*raddr);
2381 page_set_flags(h2g(*raddr), h2g(*raddr) + shm_info.shm_segsz,
2382 PAGE_VALID | PAGE_READ |
2383 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2385 for (i = 0; i < N_SHM_REGIONS; i++) {
2386 if (shm_regions[i].start == 0) {
2387 shm_regions[i].start = h2g(*raddr);
2388 shm_regions[i].size = shm_info.shm_segsz;
2397 static inline abi_long do_shmdt(abi_ulong shmaddr)
2401 for (i = 0; i < N_SHM_REGIONS; ++i) {
2402 if (shm_regions[i].start == shmaddr) {
2403 shm_regions[i].start = 0;
2404 page_set_flags(shmaddr, shm_regions[i].size, 0);
2409 return get_errno(shmdt(g2h(shmaddr)));
2412 #ifdef TARGET_NR_ipc
2413 /* ??? This only works with linear mappings. */
2414 /* do_ipc() must return target values and target errnos. */
2415 static abi_long do_ipc(unsigned int call, int first,
2416 int second, int third,
2417 abi_long ptr, abi_long fifth)
2422 version = call >> 16;
2427 ret = do_semop(first, ptr, second);
2431 ret = get_errno(semget(first, second, third));
2435 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2439 ret = get_errno(msgget(first, second));
2443 ret = do_msgsnd(first, ptr, second, third);
2447 ret = do_msgctl(first, second, ptr);
2454 struct target_ipc_kludge {
2459 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2460 ret = -TARGET_EFAULT;
2464 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2466 unlock_user_struct(tmp, ptr, 0);
2470 ret = do_msgrcv(first, ptr, second, fifth, third);
2478 unsigned long raddr;
2480 ret = do_shmat(first, ptr, second, &raddr);
2484 ret = put_user_ual(raddr, third);
2488 ret = -TARGET_EINVAL;
2494 ret = do_shmdt(ptr);
2498 ret = get_errno(shmget(first, second, third));
2502 ret = do_shmctl(first, second, third);
2506 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2507 ret = -TARGET_ENOSYS;
2514 /* kernel structure types definitions */
2517 #define STRUCT(name, list...) STRUCT_ ## name,
2518 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2520 #include "syscall_types.h"
2523 #undef STRUCT_SPECIAL
2525 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2526 #define STRUCT_SPECIAL(name)
2527 #include "syscall_types.h"
2529 #undef STRUCT_SPECIAL
2531 typedef struct IOCTLEntry {
2532 unsigned int target_cmd;
2533 unsigned int host_cmd;
2536 const argtype arg_type[5];
2539 #define IOC_R 0x0001
2540 #define IOC_W 0x0002
2541 #define IOC_RW (IOC_R | IOC_W)
2543 #define MAX_STRUCT_SIZE 4096
2545 static IOCTLEntry ioctl_entries[] = {
2546 #define IOCTL(cmd, access, types...) \
2547 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2552 /* ??? Implement proper locking for ioctls. */
2553 /* do_ioctl() Must return target values and target errnos. */
2554 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2556 const IOCTLEntry *ie;
2557 const argtype *arg_type;
2559 uint8_t buf_temp[MAX_STRUCT_SIZE];
2565 if (ie->target_cmd == 0) {
2566 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2567 return -TARGET_ENOSYS;
2569 if (ie->target_cmd == cmd)
2573 arg_type = ie->arg_type;
2575 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2577 switch(arg_type[0]) {
2580 ret = get_errno(ioctl(fd, ie->host_cmd));
2585 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2589 target_size = thunk_type_size(arg_type, 0);
2590 switch(ie->access) {
2592 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2593 if (!is_error(ret)) {
2594 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2596 return -TARGET_EFAULT;
2597 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2598 unlock_user(argptr, arg, target_size);
2602 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2604 return -TARGET_EFAULT;
2605 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2606 unlock_user(argptr, arg, 0);
2607 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2611 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2613 return -TARGET_EFAULT;
2614 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2615 unlock_user(argptr, arg, 0);
2616 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2617 if (!is_error(ret)) {
2618 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2620 return -TARGET_EFAULT;
2621 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2622 unlock_user(argptr, arg, target_size);
2628 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2629 (long)cmd, arg_type[0]);
2630 ret = -TARGET_ENOSYS;
2636 static const bitmask_transtbl iflag_tbl[] = {
2637 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2638 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2639 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2640 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2641 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2642 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2643 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2644 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2645 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2646 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2647 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2648 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2649 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2650 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2654 static const bitmask_transtbl oflag_tbl[] = {
2655 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2656 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2657 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2658 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2659 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2660 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2661 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2662 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2663 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2664 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2665 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2666 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2667 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2668 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2669 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2670 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2671 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2672 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2673 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2674 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2675 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2676 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2677 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2678 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2682 static const bitmask_transtbl cflag_tbl[] = {
2683 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2684 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2685 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2686 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2687 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2688 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2689 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2690 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2691 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2692 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2693 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2694 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2695 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2696 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2697 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2698 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2699 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2700 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2701 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2702 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2703 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2704 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2705 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2706 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2707 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2708 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2709 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2710 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2711 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2712 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2713 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2717 static const bitmask_transtbl lflag_tbl[] = {
2718 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2719 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2720 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2721 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2722 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2723 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2724 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2725 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2726 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2727 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2728 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2729 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2730 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2731 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2732 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2736 static void target_to_host_termios (void *dst, const void *src)
2738 struct host_termios *host = dst;
2739 const struct target_termios *target = src;
2742 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2744 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2746 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2748 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2749 host->c_line = target->c_line;
2751 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2752 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2753 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2754 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2755 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2756 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2757 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2758 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2759 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2760 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2761 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2762 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2763 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2764 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2765 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2766 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2767 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2770 static void host_to_target_termios (void *dst, const void *src)
2772 struct target_termios *target = dst;
2773 const struct host_termios *host = src;
2776 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2778 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2780 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2782 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2783 target->c_line = host->c_line;
2785 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2786 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2787 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2788 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2789 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2790 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2791 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2792 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2793 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2794 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2795 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2796 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2797 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2798 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2799 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2800 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2801 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2804 static const StructEntry struct_termios_def = {
2805 .convert = { host_to_target_termios, target_to_host_termios },
2806 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2807 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2810 static bitmask_transtbl mmap_flags_tbl[] = {
2811 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2812 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2813 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2814 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2815 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2816 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2817 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2818 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2822 static bitmask_transtbl fcntl_flags_tbl[] = {
2823 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2824 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2825 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2826 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2827 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2828 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2829 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2830 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2831 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2832 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2833 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2834 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2835 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2836 #if defined(O_DIRECT)
2837 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2842 #if defined(TARGET_I386)
2844 /* NOTE: there is really one LDT for all the threads */
2845 static uint8_t *ldt_table;
2847 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2854 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2855 if (size > bytecount)
2857 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2859 return -TARGET_EFAULT;
2860 /* ??? Should this by byteswapped? */
2861 memcpy(p, ldt_table, size);
2862 unlock_user(p, ptr, size);
2866 /* XXX: add locking support */
2867 static abi_long write_ldt(CPUX86State *env,
2868 abi_ulong ptr, unsigned long bytecount, int oldmode)
2870 struct target_modify_ldt_ldt_s ldt_info;
2871 struct target_modify_ldt_ldt_s *target_ldt_info;
2872 int seg_32bit, contents, read_exec_only, limit_in_pages;
2873 int seg_not_present, useable, lm;
2874 uint32_t *lp, entry_1, entry_2;
2876 if (bytecount != sizeof(ldt_info))
2877 return -TARGET_EINVAL;
2878 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2879 return -TARGET_EFAULT;
2880 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2881 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2882 ldt_info.limit = tswap32(target_ldt_info->limit);
2883 ldt_info.flags = tswap32(target_ldt_info->flags);
2884 unlock_user_struct(target_ldt_info, ptr, 0);
2886 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2887 return -TARGET_EINVAL;
2888 seg_32bit = ldt_info.flags & 1;
2889 contents = (ldt_info.flags >> 1) & 3;
2890 read_exec_only = (ldt_info.flags >> 3) & 1;
2891 limit_in_pages = (ldt_info.flags >> 4) & 1;
2892 seg_not_present = (ldt_info.flags >> 5) & 1;
2893 useable = (ldt_info.flags >> 6) & 1;
2897 lm = (ldt_info.flags >> 7) & 1;
2899 if (contents == 3) {
2901 return -TARGET_EINVAL;
2902 if (seg_not_present == 0)
2903 return -TARGET_EINVAL;
2905 /* allocate the LDT */
2907 env->ldt.base = target_mmap(0,
2908 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
2909 PROT_READ|PROT_WRITE,
2910 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
2911 if (env->ldt.base == -1)
2912 return -TARGET_ENOMEM;
2913 memset(g2h(env->ldt.base), 0,
2914 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2915 env->ldt.limit = 0xffff;
2916 ldt_table = g2h(env->ldt.base);
2919 /* NOTE: same code as Linux kernel */
2920 /* Allow LDTs to be cleared by the user. */
2921 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2924 read_exec_only == 1 &&
2926 limit_in_pages == 0 &&
2927 seg_not_present == 1 &&
2935 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2936 (ldt_info.limit & 0x0ffff);
2937 entry_2 = (ldt_info.base_addr & 0xff000000) |
2938 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2939 (ldt_info.limit & 0xf0000) |
2940 ((read_exec_only ^ 1) << 9) |
2942 ((seg_not_present ^ 1) << 15) |
2944 (limit_in_pages << 23) |
2948 entry_2 |= (useable << 20);
2950 /* Install the new entry ... */
2952 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2953 lp[0] = tswap32(entry_1);
2954 lp[1] = tswap32(entry_2);
2958 /* specific and weird i386 syscalls */
2959 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2960 unsigned long bytecount)
2966 ret = read_ldt(ptr, bytecount);
2969 ret = write_ldt(env, ptr, bytecount, 1);
2972 ret = write_ldt(env, ptr, bytecount, 0);
2975 ret = -TARGET_ENOSYS;
2981 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2982 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2984 uint64_t *gdt_table = g2h(env->gdt.base);
2985 struct target_modify_ldt_ldt_s ldt_info;
2986 struct target_modify_ldt_ldt_s *target_ldt_info;
2987 int seg_32bit, contents, read_exec_only, limit_in_pages;
2988 int seg_not_present, useable, lm;
2989 uint32_t *lp, entry_1, entry_2;
2992 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2993 if (!target_ldt_info)
2994 return -TARGET_EFAULT;
2995 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2996 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2997 ldt_info.limit = tswap32(target_ldt_info->limit);
2998 ldt_info.flags = tswap32(target_ldt_info->flags);
2999 if (ldt_info.entry_number == -1) {
3000 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3001 if (gdt_table[i] == 0) {
3002 ldt_info.entry_number = i;
3003 target_ldt_info->entry_number = tswap32(i);
3008 unlock_user_struct(target_ldt_info, ptr, 1);
3010 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3011 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3012 return -TARGET_EINVAL;
3013 seg_32bit = ldt_info.flags & 1;
3014 contents = (ldt_info.flags >> 1) & 3;
3015 read_exec_only = (ldt_info.flags >> 3) & 1;
3016 limit_in_pages = (ldt_info.flags >> 4) & 1;
3017 seg_not_present = (ldt_info.flags >> 5) & 1;
3018 useable = (ldt_info.flags >> 6) & 1;
3022 lm = (ldt_info.flags >> 7) & 1;
3025 if (contents == 3) {
3026 if (seg_not_present == 0)
3027 return -TARGET_EINVAL;
3030 /* NOTE: same code as Linux kernel */
3031 /* Allow LDTs to be cleared by the user. */
3032 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3033 if ((contents == 0 &&
3034 read_exec_only == 1 &&
3036 limit_in_pages == 0 &&
3037 seg_not_present == 1 &&
3045 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3046 (ldt_info.limit & 0x0ffff);
3047 entry_2 = (ldt_info.base_addr & 0xff000000) |
3048 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3049 (ldt_info.limit & 0xf0000) |
3050 ((read_exec_only ^ 1) << 9) |
3052 ((seg_not_present ^ 1) << 15) |
3054 (limit_in_pages << 23) |
3059 /* Install the new entry ... */
3061 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3062 lp[0] = tswap32(entry_1);
3063 lp[1] = tswap32(entry_2);
3067 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3069 struct target_modify_ldt_ldt_s *target_ldt_info;
3070 uint64_t *gdt_table = g2h(env->gdt.base);
3071 uint32_t base_addr, limit, flags;
3072 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3073 int seg_not_present, useable, lm;
3074 uint32_t *lp, entry_1, entry_2;
3076 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3077 if (!target_ldt_info)
3078 return -TARGET_EFAULT;
3079 idx = tswap32(target_ldt_info->entry_number);
3080 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3081 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3082 unlock_user_struct(target_ldt_info, ptr, 1);
3083 return -TARGET_EINVAL;
3085 lp = (uint32_t *)(gdt_table + idx);
3086 entry_1 = tswap32(lp[0]);
3087 entry_2 = tswap32(lp[1]);
3089 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3090 contents = (entry_2 >> 10) & 3;
3091 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3092 seg_32bit = (entry_2 >> 22) & 1;
3093 limit_in_pages = (entry_2 >> 23) & 1;
3094 useable = (entry_2 >> 20) & 1;
3098 lm = (entry_2 >> 21) & 1;
3100 flags = (seg_32bit << 0) | (contents << 1) |
3101 (read_exec_only << 3) | (limit_in_pages << 4) |
3102 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3103 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3104 base_addr = (entry_1 >> 16) |
3105 (entry_2 & 0xff000000) |
3106 ((entry_2 & 0xff) << 16);
3107 target_ldt_info->base_addr = tswapl(base_addr);
3108 target_ldt_info->limit = tswap32(limit);
3109 target_ldt_info->flags = tswap32(flags);
3110 unlock_user_struct(target_ldt_info, ptr, 1);
3113 #endif /* TARGET_I386 && TARGET_ABI32 */
3115 #ifndef TARGET_ABI32
3116 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3123 case TARGET_ARCH_SET_GS:
3124 case TARGET_ARCH_SET_FS:
3125 if (code == TARGET_ARCH_SET_GS)
3129 cpu_x86_load_seg(env, idx, 0);
3130 env->segs[idx].base = addr;
3132 case TARGET_ARCH_GET_GS:
3133 case TARGET_ARCH_GET_FS:
3134 if (code == TARGET_ARCH_GET_GS)
3138 val = env->segs[idx].base;
3139 if (put_user(val, addr, abi_ulong))
3140 return -TARGET_EFAULT;
3143 ret = -TARGET_EINVAL;
3150 #endif /* defined(TARGET_I386) */
3152 #if defined(USE_NPTL)
3154 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3156 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3159 pthread_mutex_t mutex;
3160 pthread_cond_t cond;
3163 abi_ulong child_tidptr;
3164 abi_ulong parent_tidptr;
3168 static void *clone_func(void *arg)
3170 new_thread_info *info = arg;
3175 info->tid = gettid();
3176 if (info->child_tidptr)
3177 put_user_u32(info->tid, info->child_tidptr);
3178 if (info->parent_tidptr)
3179 put_user_u32(info->tid, info->parent_tidptr);
3180 /* Enable signals. */
3181 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3182 /* Signal to the parent that we're ready. */
3183 pthread_mutex_lock(&info->mutex);
3184 pthread_cond_broadcast(&info->cond);
3185 pthread_mutex_unlock(&info->mutex);
3186 /* Wait until the parent has finshed initializing the tls state. */
3187 pthread_mutex_lock(&clone_lock);
3188 pthread_mutex_unlock(&clone_lock);
3194 /* this stack is the equivalent of the kernel stack associated with a
3196 #define NEW_STACK_SIZE 8192
3198 static int clone_func(void *arg)
3200 CPUState *env = arg;
3207 /* do_fork() Must return host values and target errnos (unlike most
3208 do_*() functions). */
3209 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3210 abi_ulong parent_tidptr, target_ulong newtls,
3211 abi_ulong child_tidptr)
3217 #if defined(USE_NPTL)
3218 unsigned int nptl_flags;
3222 /* Emulate vfork() with fork() */
3223 if (flags & CLONE_VFORK)
3224 flags &= ~(CLONE_VFORK | CLONE_VM);
3226 if (flags & CLONE_VM) {
3227 #if defined(USE_NPTL)
3228 new_thread_info info;
3229 pthread_attr_t attr;
3231 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
3232 init_task_state(ts);
3233 new_stack = ts->stack;
3234 /* we create a new CPU instance. */
3235 new_env = cpu_copy(env);
3236 /* Init regs that differ from the parent. */
3237 cpu_clone_regs(new_env, newsp);
3238 new_env->opaque = ts;
3239 #if defined(USE_NPTL)
3241 flags &= ~CLONE_NPTL_FLAGS2;
3243 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3244 if (nptl_flags & CLONE_SETTLS)
3245 cpu_set_tls (new_env, newtls);
3247 /* Grab a mutex so that thread setup appears atomic. */
3248 pthread_mutex_lock(&clone_lock);
3250 memset(&info, 0, sizeof(info));
3251 pthread_mutex_init(&info.mutex, NULL);
3252 pthread_mutex_lock(&info.mutex);
3253 pthread_cond_init(&info.cond, NULL);
3255 if (nptl_flags & CLONE_CHILD_SETTID)
3256 info.child_tidptr = child_tidptr;
3257 if (nptl_flags & CLONE_PARENT_SETTID)
3258 info.parent_tidptr = parent_tidptr;
3260 ret = pthread_attr_init(&attr);
3261 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
3262 /* It is not safe to deliver signals until the child has finished
3263 initializing, so temporarily block all signals. */
3264 sigfillset(&sigmask);
3265 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3267 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3269 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3270 pthread_attr_destroy(&attr);
3272 /* Wait for the child to initialize. */
3273 pthread_cond_wait(&info.cond, &info.mutex);
3275 if (flags & CLONE_PARENT_SETTID)
3276 put_user_u32(ret, parent_tidptr);
3280 pthread_mutex_unlock(&info.mutex);
3281 pthread_cond_destroy(&info.cond);
3282 pthread_mutex_destroy(&info.mutex);
3283 pthread_mutex_unlock(&clone_lock);
3285 if (flags & CLONE_NPTL_FLAGS2)
3287 /* This is probably going to die very quickly, but do it anyway. */
3289 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3291 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3295 /* if no CLONE_VM, we consider it is a fork */
3296 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3301 /* Child Process. */
3302 cpu_clone_regs(env, newsp);
3304 #if defined(USE_NPTL)
3305 /* There is a race condition here. The parent process could
3306 theoretically read the TID in the child process before the child
3307 tid is set. This would require using either ptrace
3308 (not implemented) or having *_tidptr to point at a shared memory
3309 mapping. We can't repeat the spinlock hack used above because
3310 the child process gets its own copy of the lock. */
3311 if (flags & CLONE_CHILD_SETTID)
3312 put_user_u32(gettid(), child_tidptr);
3313 if (flags & CLONE_PARENT_SETTID)
3314 put_user_u32(gettid(), parent_tidptr);
3315 ts = (TaskState *)env->opaque;
3316 if (flags & CLONE_SETTLS)
3317 cpu_set_tls (env, newtls);
3318 /* TODO: Implement CLONE_CHILD_CLEARTID. */
3327 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3330 struct target_flock *target_fl;
3331 struct flock64 fl64;
3332 struct target_flock64 *target_fl64;
3336 case TARGET_F_GETLK:
3337 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3338 return -TARGET_EFAULT;
3339 fl.l_type = tswap16(target_fl->l_type);
3340 fl.l_whence = tswap16(target_fl->l_whence);
3341 fl.l_start = tswapl(target_fl->l_start);
3342 fl.l_len = tswapl(target_fl->l_len);
3343 fl.l_pid = tswapl(target_fl->l_pid);
3344 unlock_user_struct(target_fl, arg, 0);
3345 ret = get_errno(fcntl(fd, cmd, &fl));
3347 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3348 return -TARGET_EFAULT;
3349 target_fl->l_type = tswap16(fl.l_type);
3350 target_fl->l_whence = tswap16(fl.l_whence);
3351 target_fl->l_start = tswapl(fl.l_start);
3352 target_fl->l_len = tswapl(fl.l_len);
3353 target_fl->l_pid = tswapl(fl.l_pid);
3354 unlock_user_struct(target_fl, arg, 1);
3358 case TARGET_F_SETLK:
3359 case TARGET_F_SETLKW:
3360 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3361 return -TARGET_EFAULT;
3362 fl.l_type = tswap16(target_fl->l_type);
3363 fl.l_whence = tswap16(target_fl->l_whence);
3364 fl.l_start = tswapl(target_fl->l_start);
3365 fl.l_len = tswapl(target_fl->l_len);
3366 fl.l_pid = tswapl(target_fl->l_pid);
3367 unlock_user_struct(target_fl, arg, 0);
3368 ret = get_errno(fcntl(fd, cmd, &fl));
3371 case TARGET_F_GETLK64:
3372 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3373 return -TARGET_EFAULT;
3374 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3375 fl64.l_whence = tswap16(target_fl64->l_whence);
3376 fl64.l_start = tswapl(target_fl64->l_start);
3377 fl64.l_len = tswapl(target_fl64->l_len);
3378 fl64.l_pid = tswap16(target_fl64->l_pid);
3379 unlock_user_struct(target_fl64, arg, 0);
3380 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3382 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3383 return -TARGET_EFAULT;
3384 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3385 target_fl64->l_whence = tswap16(fl64.l_whence);
3386 target_fl64->l_start = tswapl(fl64.l_start);
3387 target_fl64->l_len = tswapl(fl64.l_len);
3388 target_fl64->l_pid = tswapl(fl64.l_pid);
3389 unlock_user_struct(target_fl64, arg, 1);
3392 case TARGET_F_SETLK64:
3393 case TARGET_F_SETLKW64:
3394 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3395 return -TARGET_EFAULT;
3396 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3397 fl64.l_whence = tswap16(target_fl64->l_whence);
3398 fl64.l_start = tswapl(target_fl64->l_start);
3399 fl64.l_len = tswapl(target_fl64->l_len);
3400 fl64.l_pid = tswap16(target_fl64->l_pid);
3401 unlock_user_struct(target_fl64, arg, 0);
3402 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3406 ret = get_errno(fcntl(fd, cmd, arg));
3408 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3413 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3417 ret = get_errno(fcntl(fd, cmd, arg));
3425 static inline int high2lowuid(int uid)
3433 static inline int high2lowgid(int gid)
3441 static inline int low2highuid(int uid)
3443 if ((int16_t)uid == -1)
3449 static inline int low2highgid(int gid)
3451 if ((int16_t)gid == -1)
3457 #endif /* USE_UID16 */
3459 void syscall_init(void)
3462 const argtype *arg_type;
3466 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3467 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3468 #include "syscall_types.h"
3470 #undef STRUCT_SPECIAL
3472 /* we patch the ioctl size if necessary. We rely on the fact that
3473 no ioctl has all the bits at '1' in the size field */
3475 while (ie->target_cmd != 0) {
3476 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3477 TARGET_IOC_SIZEMASK) {
3478 arg_type = ie->arg_type;
3479 if (arg_type[0] != TYPE_PTR) {
3480 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3485 size = thunk_type_size(arg_type, 0);
3486 ie->target_cmd = (ie->target_cmd &
3487 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3488 (size << TARGET_IOC_SIZESHIFT);
3491 /* Build target_to_host_errno_table[] table from
3492 * host_to_target_errno_table[]. */
3493 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3494 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3496 /* automatic consistency check if same arch */
3497 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3498 (defined(__x86_64__) && defined(TARGET_X86_64))
3499 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3500 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3501 ie->name, ie->target_cmd, ie->host_cmd);
3508 #if TARGET_ABI_BITS == 32
3509 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3511 #ifdef TARGET_WORDS_BIGENDIAN
3512 return ((uint64_t)word0 << 32) | word1;
3514 return ((uint64_t)word1 << 32) | word0;
3517 #else /* TARGET_ABI_BITS == 32 */
3518 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3522 #endif /* TARGET_ABI_BITS != 32 */
3524 #ifdef TARGET_NR_truncate64
3525 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3531 if (((CPUARMState *)cpu_env)->eabi)
3537 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3541 #ifdef TARGET_NR_ftruncate64
3542 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3548 if (((CPUARMState *)cpu_env)->eabi)
3554 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3558 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3559 abi_ulong target_addr)
3561 struct target_timespec *target_ts;
3563 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3564 return -TARGET_EFAULT;
3565 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3566 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3567 unlock_user_struct(target_ts, target_addr, 0);
3571 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3572 struct timespec *host_ts)
3574 struct target_timespec *target_ts;
3576 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3577 return -TARGET_EFAULT;
3578 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3579 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3580 unlock_user_struct(target_ts, target_addr, 1);
3584 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3585 static inline abi_long host_to_target_stat64(void *cpu_env,
3586 abi_ulong target_addr,
3587 struct stat *host_st)
3590 if (((CPUARMState *)cpu_env)->eabi) {
3591 struct target_eabi_stat64 *target_st;
3593 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3594 return -TARGET_EFAULT;
3595 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3596 __put_user(host_st->st_dev, &target_st->st_dev);
3597 __put_user(host_st->st_ino, &target_st->st_ino);
3598 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3599 __put_user(host_st->st_ino, &target_st->__st_ino);
3601 __put_user(host_st->st_mode, &target_st->st_mode);
3602 __put_user(host_st->st_nlink, &target_st->st_nlink);
3603 __put_user(host_st->st_uid, &target_st->st_uid);
3604 __put_user(host_st->st_gid, &target_st->st_gid);
3605 __put_user(host_st->st_rdev, &target_st->st_rdev);
3606 __put_user(host_st->st_size, &target_st->st_size);
3607 __put_user(host_st->st_blksize, &target_st->st_blksize);
3608 __put_user(host_st->st_blocks, &target_st->st_blocks);
3609 __put_user(host_st->st_atime, &target_st->target_st_atime);
3610 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3611 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3612 unlock_user_struct(target_st, target_addr, 1);
3616 #if TARGET_LONG_BITS == 64
3617 struct target_stat *target_st;
3619 struct target_stat64 *target_st;
3622 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3623 return -TARGET_EFAULT;
3624 memset(target_st, 0, sizeof(*target_st));
3625 __put_user(host_st->st_dev, &target_st->st_dev);
3626 __put_user(host_st->st_ino, &target_st->st_ino);
3627 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3628 __put_user(host_st->st_ino, &target_st->__st_ino);
3630 __put_user(host_st->st_mode, &target_st->st_mode);
3631 __put_user(host_st->st_nlink, &target_st->st_nlink);
3632 __put_user(host_st->st_uid, &target_st->st_uid);
3633 __put_user(host_st->st_gid, &target_st->st_gid);
3634 __put_user(host_st->st_rdev, &target_st->st_rdev);
3635 /* XXX: better use of kernel struct */
3636 __put_user(host_st->st_size, &target_st->st_size);
3637 __put_user(host_st->st_blksize, &target_st->st_blksize);
3638 __put_user(host_st->st_blocks, &target_st->st_blocks);
3639 __put_user(host_st->st_atime, &target_st->target_st_atime);
3640 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3641 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3642 unlock_user_struct(target_st, target_addr, 1);
3649 #if defined(USE_NPTL)
3650 /* ??? Using host futex calls even when target atomic operations
3651 are not really atomic probably breaks things. However implementing
3652 futexes locally would make futexes shared between multiple processes
3653 tricky. However they're probably useless because guest atomic
3654 operations won't work either. */
3655 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3656 target_ulong uaddr2, int val3)
3658 struct timespec ts, *pts;
3660 /* ??? We assume FUTEX_* constants are the same on both host
3666 target_to_host_timespec(pts, timeout);
3670 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3673 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3675 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3677 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3678 NULL, g2h(uaddr2), 0));
3679 case FUTEX_CMP_REQUEUE:
3680 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3681 NULL, g2h(uaddr2), tswap32(val3)));
3683 return -TARGET_ENOSYS;
3688 int get_osversion(void)
3690 static int osversion;
3691 struct new_utsname buf;
3696 if (qemu_uname_release && *qemu_uname_release) {
3697 s = qemu_uname_release;
3699 if (sys_uname(&buf))
3704 for (i = 0; i < 3; i++) {
3706 while (*s >= '0' && *s <= '9') {
3711 tmp = (tmp << 8) + n;
3719 /* do_syscall() should always have a single exit point at the end so
3720 that actions, such as logging of syscall results, can be performed.
3721 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3722 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3723 abi_long arg2, abi_long arg3, abi_long arg4,
3724 abi_long arg5, abi_long arg6)
3732 gemu_log("syscall %d", num);
3735 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3738 case TARGET_NR_exit:
3742 gdb_exit(cpu_env, arg1);
3743 /* XXX: should free thread stack and CPU env */
3745 ret = 0; /* avoid warning */
3747 case TARGET_NR_read:
3751 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3753 ret = get_errno(read(arg1, p, arg3));
3754 unlock_user(p, arg2, ret);
3757 case TARGET_NR_write:
3758 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3760 ret = get_errno(write(arg1, p, arg3));
3761 unlock_user(p, arg2, 0);
3763 case TARGET_NR_open:
3764 if (!(p = lock_user_string(arg1)))
3766 ret = get_errno(open(path(p),
3767 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3769 unlock_user(p, arg1, 0);
3771 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3772 case TARGET_NR_openat:
3773 if (!(p = lock_user_string(arg2)))
3775 ret = get_errno(sys_openat(arg1,
3777 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3779 unlock_user(p, arg2, 0);
3782 case TARGET_NR_close:
3783 ret = get_errno(close(arg1));
3788 case TARGET_NR_fork:
3789 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3791 #ifdef TARGET_NR_waitpid
3792 case TARGET_NR_waitpid:
3795 ret = get_errno(waitpid(arg1, &status, arg3));
3796 if (!is_error(ret) && arg2
3797 && put_user_s32(status, arg2))
3802 #ifdef TARGET_NR_waitid
3803 case TARGET_NR_waitid:
3807 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3808 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3809 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3811 host_to_target_siginfo(p, &info);
3812 unlock_user(p, arg3, sizeof(target_siginfo_t));
3817 #ifdef TARGET_NR_creat /* not on alpha */
3818 case TARGET_NR_creat:
3819 if (!(p = lock_user_string(arg1)))
3821 ret = get_errno(creat(p, arg2));
3822 unlock_user(p, arg1, 0);
3825 case TARGET_NR_link:
3828 p = lock_user_string(arg1);
3829 p2 = lock_user_string(arg2);
3831 ret = -TARGET_EFAULT;
3833 ret = get_errno(link(p, p2));
3834 unlock_user(p2, arg2, 0);
3835 unlock_user(p, arg1, 0);
3838 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3839 case TARGET_NR_linkat:
3844 p = lock_user_string(arg2);
3845 p2 = lock_user_string(arg4);
3847 ret = -TARGET_EFAULT;
3849 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3850 unlock_user(p, arg2, 0);
3851 unlock_user(p2, arg4, 0);
3855 case TARGET_NR_unlink:
3856 if (!(p = lock_user_string(arg1)))
3858 ret = get_errno(unlink(p));
3859 unlock_user(p, arg1, 0);
3861 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3862 case TARGET_NR_unlinkat:
3863 if (!(p = lock_user_string(arg2)))
3865 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3866 unlock_user(p, arg2, 0);
3869 case TARGET_NR_execve:
3871 char **argp, **envp;
3874 abi_ulong guest_argp;
3875 abi_ulong guest_envp;
3881 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3882 if (get_user_ual(addr, gp))
3890 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3891 if (get_user_ual(addr, gp))
3898 argp = alloca((argc + 1) * sizeof(void *));
3899 envp = alloca((envc + 1) * sizeof(void *));
3901 for (gp = guest_argp, q = argp; gp;
3902 gp += sizeof(abi_ulong), q++) {
3903 if (get_user_ual(addr, gp))
3907 if (!(*q = lock_user_string(addr)))
3912 for (gp = guest_envp, q = envp; gp;
3913 gp += sizeof(abi_ulong), q++) {
3914 if (get_user_ual(addr, gp))
3918 if (!(*q = lock_user_string(addr)))
3923 if (!(p = lock_user_string(arg1)))
3925 ret = get_errno(execve(p, argp, envp));
3926 unlock_user(p, arg1, 0);
3931 ret = -TARGET_EFAULT;
3934 for (gp = guest_argp, q = argp; *q;
3935 gp += sizeof(abi_ulong), q++) {
3936 if (get_user_ual(addr, gp)
3939 unlock_user(*q, addr, 0);
3941 for (gp = guest_envp, q = envp; *q;
3942 gp += sizeof(abi_ulong), q++) {
3943 if (get_user_ual(addr, gp)
3946 unlock_user(*q, addr, 0);
3950 case TARGET_NR_chdir:
3951 if (!(p = lock_user_string(arg1)))
3953 ret = get_errno(chdir(p));
3954 unlock_user(p, arg1, 0);
3956 #ifdef TARGET_NR_time
3957 case TARGET_NR_time:
3960 ret = get_errno(time(&host_time));
3963 && put_user_sal(host_time, arg1))
3968 case TARGET_NR_mknod:
3969 if (!(p = lock_user_string(arg1)))
3971 ret = get_errno(mknod(p, arg2, arg3));
3972 unlock_user(p, arg1, 0);
3974 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3975 case TARGET_NR_mknodat:
3976 if (!(p = lock_user_string(arg2)))
3978 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3979 unlock_user(p, arg2, 0);
3982 case TARGET_NR_chmod:
3983 if (!(p = lock_user_string(arg1)))
3985 ret = get_errno(chmod(p, arg2));
3986 unlock_user(p, arg1, 0);
3988 #ifdef TARGET_NR_break
3989 case TARGET_NR_break:
3992 #ifdef TARGET_NR_oldstat
3993 case TARGET_NR_oldstat:
3996 case TARGET_NR_lseek:
3997 ret = get_errno(lseek(arg1, arg2, arg3));
3999 #ifdef TARGET_NR_getxpid
4000 case TARGET_NR_getxpid:
4002 case TARGET_NR_getpid:
4004 ret = get_errno(getpid());
4006 case TARGET_NR_mount:
4008 /* need to look at the data field */
4010 p = lock_user_string(arg1);
4011 p2 = lock_user_string(arg2);
4012 p3 = lock_user_string(arg3);
4013 if (!p || !p2 || !p3)
4014 ret = -TARGET_EFAULT;
4016 /* FIXME - arg5 should be locked, but it isn't clear how to
4017 * do that since it's not guaranteed to be a NULL-terminated
4020 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4021 unlock_user(p, arg1, 0);
4022 unlock_user(p2, arg2, 0);
4023 unlock_user(p3, arg3, 0);
4026 #ifdef TARGET_NR_umount
4027 case TARGET_NR_umount:
4028 if (!(p = lock_user_string(arg1)))
4030 ret = get_errno(umount(p));
4031 unlock_user(p, arg1, 0);
4034 #ifdef TARGET_NR_stime /* not on alpha */
4035 case TARGET_NR_stime:
4038 if (get_user_sal(host_time, arg1))
4040 ret = get_errno(stime(&host_time));
4044 case TARGET_NR_ptrace:
4046 #ifdef TARGET_NR_alarm /* not on alpha */
4047 case TARGET_NR_alarm:
4051 #ifdef TARGET_NR_oldfstat
4052 case TARGET_NR_oldfstat:
4055 #ifdef TARGET_NR_pause /* not on alpha */
4056 case TARGET_NR_pause:
4057 ret = get_errno(pause());
4060 #ifdef TARGET_NR_utime
4061 case TARGET_NR_utime:
4063 struct utimbuf tbuf, *host_tbuf;
4064 struct target_utimbuf *target_tbuf;
4066 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4068 tbuf.actime = tswapl(target_tbuf->actime);
4069 tbuf.modtime = tswapl(target_tbuf->modtime);
4070 unlock_user_struct(target_tbuf, arg2, 0);
4075 if (!(p = lock_user_string(arg1)))
4077 ret = get_errno(utime(p, host_tbuf));
4078 unlock_user(p, arg1, 0);
4082 case TARGET_NR_utimes:
4084 struct timeval *tvp, tv[2];
4086 if (copy_from_user_timeval(&tv[0], arg2)
4087 || copy_from_user_timeval(&tv[1],
4088 arg2 + sizeof(struct target_timeval)))
4094 if (!(p = lock_user_string(arg1)))
4096 ret = get_errno(utimes(p, tvp));
4097 unlock_user(p, arg1, 0);
4100 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4101 case TARGET_NR_futimesat:
4103 struct timeval *tvp, tv[2];
4105 if (copy_from_user_timeval(&tv[0], arg3)
4106 || copy_from_user_timeval(&tv[1],
4107 arg3 + sizeof(struct target_timeval)))
4113 if (!(p = lock_user_string(arg2)))
4115 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4116 unlock_user(p, arg2, 0);
4120 #ifdef TARGET_NR_stty
4121 case TARGET_NR_stty:
4124 #ifdef TARGET_NR_gtty
4125 case TARGET_NR_gtty:
4128 case TARGET_NR_access:
4129 if (!(p = lock_user_string(arg1)))
4131 ret = get_errno(access(p, arg2));
4132 unlock_user(p, arg1, 0);
4134 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4135 case TARGET_NR_faccessat:
4136 if (!(p = lock_user_string(arg2)))
4138 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
4139 unlock_user(p, arg2, 0);
4142 #ifdef TARGET_NR_nice /* not on alpha */
4143 case TARGET_NR_nice:
4144 ret = get_errno(nice(arg1));
4147 #ifdef TARGET_NR_ftime
4148 case TARGET_NR_ftime:
4151 case TARGET_NR_sync:
4155 case TARGET_NR_kill:
4156 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4158 case TARGET_NR_rename:
4161 p = lock_user_string(arg1);
4162 p2 = lock_user_string(arg2);
4164 ret = -TARGET_EFAULT;
4166 ret = get_errno(rename(p, p2));
4167 unlock_user(p2, arg2, 0);
4168 unlock_user(p, arg1, 0);
4171 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4172 case TARGET_NR_renameat:
4175 p = lock_user_string(arg2);
4176 p2 = lock_user_string(arg4);
4178 ret = -TARGET_EFAULT;
4180 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4181 unlock_user(p2, arg4, 0);
4182 unlock_user(p, arg2, 0);
4186 case TARGET_NR_mkdir:
4187 if (!(p = lock_user_string(arg1)))
4189 ret = get_errno(mkdir(p, arg2));
4190 unlock_user(p, arg1, 0);
4192 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4193 case TARGET_NR_mkdirat:
4194 if (!(p = lock_user_string(arg2)))
4196 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4197 unlock_user(p, arg2, 0);
4200 case TARGET_NR_rmdir:
4201 if (!(p = lock_user_string(arg1)))
4203 ret = get_errno(rmdir(p));
4204 unlock_user(p, arg1, 0);
4207 ret = get_errno(dup(arg1));
4209 case TARGET_NR_pipe:
4212 ret = get_errno(pipe(host_pipe));
4213 if (!is_error(ret)) {
4214 #if defined(TARGET_MIPS)
4215 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
4216 env->active_tc.gpr[3] = host_pipe[1];
4218 #elif defined(TARGET_SH4)
4219 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
4222 if (put_user_s32(host_pipe[0], arg1)
4223 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
4229 case TARGET_NR_times:
4231 struct target_tms *tmsp;
4233 ret = get_errno(times(&tms));
4235 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4238 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4239 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4240 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4241 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4244 ret = host_to_target_clock_t(ret);
4247 #ifdef TARGET_NR_prof
4248 case TARGET_NR_prof:
4251 #ifdef TARGET_NR_signal
4252 case TARGET_NR_signal:
4255 case TARGET_NR_acct:
4257 ret = get_errno(acct(NULL));
4259 if (!(p = lock_user_string(arg1)))
4261 ret = get_errno(acct(path(p)));
4262 unlock_user(p, arg1, 0);
4265 #ifdef TARGET_NR_umount2 /* not on alpha */
4266 case TARGET_NR_umount2:
4267 if (!(p = lock_user_string(arg1)))
4269 ret = get_errno(umount2(p, arg2));
4270 unlock_user(p, arg1, 0);
4273 #ifdef TARGET_NR_lock
4274 case TARGET_NR_lock:
4277 case TARGET_NR_ioctl:
4278 ret = do_ioctl(arg1, arg2, arg3);
4280 case TARGET_NR_fcntl:
4281 ret = do_fcntl(arg1, arg2, arg3);
4283 #ifdef TARGET_NR_mpx
4287 case TARGET_NR_setpgid:
4288 ret = get_errno(setpgid(arg1, arg2));
4290 #ifdef TARGET_NR_ulimit
4291 case TARGET_NR_ulimit:
4294 #ifdef TARGET_NR_oldolduname
4295 case TARGET_NR_oldolduname:
4298 case TARGET_NR_umask:
4299 ret = get_errno(umask(arg1));
4301 case TARGET_NR_chroot:
4302 if (!(p = lock_user_string(arg1)))
4304 ret = get_errno(chroot(p));
4305 unlock_user(p, arg1, 0);
4307 case TARGET_NR_ustat:
4309 case TARGET_NR_dup2:
4310 ret = get_errno(dup2(arg1, arg2));
4312 #ifdef TARGET_NR_getppid /* not on alpha */
4313 case TARGET_NR_getppid:
4314 ret = get_errno(getppid());
4317 case TARGET_NR_getpgrp:
4318 ret = get_errno(getpgrp());
4320 case TARGET_NR_setsid:
4321 ret = get_errno(setsid());
4323 #ifdef TARGET_NR_sigaction
4324 case TARGET_NR_sigaction:
4326 #if !defined(TARGET_MIPS)
4327 struct target_old_sigaction *old_act;
4328 struct target_sigaction act, oact, *pact;
4330 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4332 act._sa_handler = old_act->_sa_handler;
4333 target_siginitset(&act.sa_mask, old_act->sa_mask);
4334 act.sa_flags = old_act->sa_flags;
4335 act.sa_restorer = old_act->sa_restorer;
4336 unlock_user_struct(old_act, arg2, 0);
4341 ret = get_errno(do_sigaction(arg1, pact, &oact));
4342 if (!is_error(ret) && arg3) {
4343 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4345 old_act->_sa_handler = oact._sa_handler;
4346 old_act->sa_mask = oact.sa_mask.sig[0];
4347 old_act->sa_flags = oact.sa_flags;
4348 old_act->sa_restorer = oact.sa_restorer;
4349 unlock_user_struct(old_act, arg3, 1);
4352 struct target_sigaction act, oact, *pact, *old_act;
4355 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4357 act._sa_handler = old_act->_sa_handler;
4358 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4359 act.sa_flags = old_act->sa_flags;
4360 unlock_user_struct(old_act, arg2, 0);
4366 ret = get_errno(do_sigaction(arg1, pact, &oact));
4368 if (!is_error(ret) && arg3) {
4369 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4371 old_act->_sa_handler = oact._sa_handler;
4372 old_act->sa_flags = oact.sa_flags;
4373 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4374 old_act->sa_mask.sig[1] = 0;
4375 old_act->sa_mask.sig[2] = 0;
4376 old_act->sa_mask.sig[3] = 0;
4377 unlock_user_struct(old_act, arg3, 1);
4383 case TARGET_NR_rt_sigaction:
4385 struct target_sigaction *act;
4386 struct target_sigaction *oact;
4389 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4394 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4395 ret = -TARGET_EFAULT;
4396 goto rt_sigaction_fail;
4400 ret = get_errno(do_sigaction(arg1, act, oact));
4403 unlock_user_struct(act, arg2, 0);
4405 unlock_user_struct(oact, arg3, 1);
4408 #ifdef TARGET_NR_sgetmask /* not on alpha */
4409 case TARGET_NR_sgetmask:
4412 abi_ulong target_set;
4413 sigprocmask(0, NULL, &cur_set);
4414 host_to_target_old_sigset(&target_set, &cur_set);
4419 #ifdef TARGET_NR_ssetmask /* not on alpha */
4420 case TARGET_NR_ssetmask:
4422 sigset_t set, oset, cur_set;
4423 abi_ulong target_set = arg1;
4424 sigprocmask(0, NULL, &cur_set);
4425 target_to_host_old_sigset(&set, &target_set);
4426 sigorset(&set, &set, &cur_set);
4427 sigprocmask(SIG_SETMASK, &set, &oset);
4428 host_to_target_old_sigset(&target_set, &oset);
4433 #ifdef TARGET_NR_sigprocmask
4434 case TARGET_NR_sigprocmask:
4437 sigset_t set, oldset, *set_ptr;
4441 case TARGET_SIG_BLOCK:
4444 case TARGET_SIG_UNBLOCK:
4447 case TARGET_SIG_SETMASK:
4451 ret = -TARGET_EINVAL;
4454 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4456 target_to_host_old_sigset(&set, p);
4457 unlock_user(p, arg2, 0);
4463 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4464 if (!is_error(ret) && arg3) {
4465 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4467 host_to_target_old_sigset(p, &oldset);
4468 unlock_user(p, arg3, sizeof(target_sigset_t));
4473 case TARGET_NR_rt_sigprocmask:
4476 sigset_t set, oldset, *set_ptr;
4480 case TARGET_SIG_BLOCK:
4483 case TARGET_SIG_UNBLOCK:
4486 case TARGET_SIG_SETMASK:
4490 ret = -TARGET_EINVAL;
4493 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4495 target_to_host_sigset(&set, p);
4496 unlock_user(p, arg2, 0);
4502 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4503 if (!is_error(ret) && arg3) {
4504 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4506 host_to_target_sigset(p, &oldset);
4507 unlock_user(p, arg3, sizeof(target_sigset_t));
4511 #ifdef TARGET_NR_sigpending
4512 case TARGET_NR_sigpending:
4515 ret = get_errno(sigpending(&set));
4516 if (!is_error(ret)) {
4517 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4519 host_to_target_old_sigset(p, &set);
4520 unlock_user(p, arg1, sizeof(target_sigset_t));
4525 case TARGET_NR_rt_sigpending:
4528 ret = get_errno(sigpending(&set));
4529 if (!is_error(ret)) {
4530 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4532 host_to_target_sigset(p, &set);
4533 unlock_user(p, arg1, sizeof(target_sigset_t));
4537 #ifdef TARGET_NR_sigsuspend
4538 case TARGET_NR_sigsuspend:
4541 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4543 target_to_host_old_sigset(&set, p);
4544 unlock_user(p, arg1, 0);
4545 ret = get_errno(sigsuspend(&set));
4549 case TARGET_NR_rt_sigsuspend:
4552 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4554 target_to_host_sigset(&set, p);
4555 unlock_user(p, arg1, 0);
4556 ret = get_errno(sigsuspend(&set));
4559 case TARGET_NR_rt_sigtimedwait:
4562 struct timespec uts, *puts;
4565 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4567 target_to_host_sigset(&set, p);
4568 unlock_user(p, arg1, 0);
4571 target_to_host_timespec(puts, arg3);
4575 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4576 if (!is_error(ret) && arg2) {
4577 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4579 host_to_target_siginfo(p, &uinfo);
4580 unlock_user(p, arg2, sizeof(target_siginfo_t));
4584 case TARGET_NR_rt_sigqueueinfo:
4587 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4589 target_to_host_siginfo(&uinfo, p);
4590 unlock_user(p, arg1, 0);
4591 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4594 #ifdef TARGET_NR_sigreturn
4595 case TARGET_NR_sigreturn:
4596 /* NOTE: ret is eax, so not transcoding must be done */
4597 ret = do_sigreturn(cpu_env);
4600 case TARGET_NR_rt_sigreturn:
4601 /* NOTE: ret is eax, so not transcoding must be done */
4602 ret = do_rt_sigreturn(cpu_env);
4604 case TARGET_NR_sethostname:
4605 if (!(p = lock_user_string(arg1)))
4607 ret = get_errno(sethostname(p, arg2));
4608 unlock_user(p, arg1, 0);
4610 case TARGET_NR_setrlimit:
4612 /* XXX: convert resource ? */
4613 int resource = arg1;
4614 struct target_rlimit *target_rlim;
4616 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4618 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4619 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4620 unlock_user_struct(target_rlim, arg2, 0);
4621 ret = get_errno(setrlimit(resource, &rlim));
4624 case TARGET_NR_getrlimit:
4626 /* XXX: convert resource ? */
4627 int resource = arg1;
4628 struct target_rlimit *target_rlim;
4631 ret = get_errno(getrlimit(resource, &rlim));
4632 if (!is_error(ret)) {
4633 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4635 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4636 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4637 unlock_user_struct(target_rlim, arg2, 1);
4641 case TARGET_NR_getrusage:
4643 struct rusage rusage;
4644 ret = get_errno(getrusage(arg1, &rusage));
4645 if (!is_error(ret)) {
4646 host_to_target_rusage(arg2, &rusage);
4650 case TARGET_NR_gettimeofday:
4653 ret = get_errno(gettimeofday(&tv, NULL));
4654 if (!is_error(ret)) {
4655 if (copy_to_user_timeval(arg1, &tv))
4660 case TARGET_NR_settimeofday:
4663 if (copy_from_user_timeval(&tv, arg1))
4665 ret = get_errno(settimeofday(&tv, NULL));
4668 #ifdef TARGET_NR_select
4669 case TARGET_NR_select:
4671 struct target_sel_arg_struct *sel;
4672 abi_ulong inp, outp, exp, tvp;
4675 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4677 nsel = tswapl(sel->n);
4678 inp = tswapl(sel->inp);
4679 outp = tswapl(sel->outp);
4680 exp = tswapl(sel->exp);
4681 tvp = tswapl(sel->tvp);
4682 unlock_user_struct(sel, arg1, 0);
4683 ret = do_select(nsel, inp, outp, exp, tvp);
4687 case TARGET_NR_symlink:
4690 p = lock_user_string(arg1);
4691 p2 = lock_user_string(arg2);
4693 ret = -TARGET_EFAULT;
4695 ret = get_errno(symlink(p, p2));
4696 unlock_user(p2, arg2, 0);
4697 unlock_user(p, arg1, 0);
4700 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4701 case TARGET_NR_symlinkat:
4704 p = lock_user_string(arg1);
4705 p2 = lock_user_string(arg3);
4707 ret = -TARGET_EFAULT;
4709 ret = get_errno(sys_symlinkat(p, arg2, p2));
4710 unlock_user(p2, arg3, 0);
4711 unlock_user(p, arg1, 0);
4715 #ifdef TARGET_NR_oldlstat
4716 case TARGET_NR_oldlstat:
4719 case TARGET_NR_readlink:
4722 p = lock_user_string(arg1);
4723 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4725 ret = -TARGET_EFAULT;
4727 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0)
4728 ret = get_errno(snprintf((char *)p2, arg3, "%s", exec_path));
4730 ret = get_errno(readlink(path(p), p2, arg3));
4733 unlock_user(p2, arg2, ret);
4734 unlock_user(p, arg1, 0);
4737 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4738 case TARGET_NR_readlinkat:
4741 p = lock_user_string(arg2);
4742 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4744 ret = -TARGET_EFAULT;
4746 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4747 unlock_user(p2, arg3, ret);
4748 unlock_user(p, arg2, 0);
4752 #ifdef TARGET_NR_uselib
4753 case TARGET_NR_uselib:
4756 #ifdef TARGET_NR_swapon
4757 case TARGET_NR_swapon:
4758 if (!(p = lock_user_string(arg1)))
4760 ret = get_errno(swapon(p, arg2));
4761 unlock_user(p, arg1, 0);
4764 case TARGET_NR_reboot:
4766 #ifdef TARGET_NR_readdir
4767 case TARGET_NR_readdir:
4770 #ifdef TARGET_NR_mmap
4771 case TARGET_NR_mmap:
4772 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4775 abi_ulong v1, v2, v3, v4, v5, v6;
4776 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4784 unlock_user(v, arg1, 0);
4785 ret = get_errno(target_mmap(v1, v2, v3,
4786 target_to_host_bitmask(v4, mmap_flags_tbl),
4790 ret = get_errno(target_mmap(arg1, arg2, arg3,
4791 target_to_host_bitmask(arg4, mmap_flags_tbl),
4797 #ifdef TARGET_NR_mmap2
4798 case TARGET_NR_mmap2:
4800 #define MMAP_SHIFT 12
4802 ret = get_errno(target_mmap(arg1, arg2, arg3,
4803 target_to_host_bitmask(arg4, mmap_flags_tbl),
4805 arg6 << MMAP_SHIFT));
4808 case TARGET_NR_munmap:
4809 ret = get_errno(target_munmap(arg1, arg2));
4811 case TARGET_NR_mprotect:
4812 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4814 #ifdef TARGET_NR_mremap
4815 case TARGET_NR_mremap:
4816 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4819 /* ??? msync/mlock/munlock are broken for softmmu. */
4820 #ifdef TARGET_NR_msync
4821 case TARGET_NR_msync:
4822 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4825 #ifdef TARGET_NR_mlock
4826 case TARGET_NR_mlock:
4827 ret = get_errno(mlock(g2h(arg1), arg2));
4830 #ifdef TARGET_NR_munlock
4831 case TARGET_NR_munlock:
4832 ret = get_errno(munlock(g2h(arg1), arg2));
4835 #ifdef TARGET_NR_mlockall
4836 case TARGET_NR_mlockall:
4837 ret = get_errno(mlockall(arg1));
4840 #ifdef TARGET_NR_munlockall
4841 case TARGET_NR_munlockall:
4842 ret = get_errno(munlockall());
4845 case TARGET_NR_truncate:
4846 if (!(p = lock_user_string(arg1)))
4848 ret = get_errno(truncate(p, arg2));
4849 unlock_user(p, arg1, 0);
4851 case TARGET_NR_ftruncate:
4852 ret = get_errno(ftruncate(arg1, arg2));
4854 case TARGET_NR_fchmod:
4855 ret = get_errno(fchmod(arg1, arg2));
4857 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4858 case TARGET_NR_fchmodat:
4859 if (!(p = lock_user_string(arg2)))
4861 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4862 unlock_user(p, arg2, 0);
4865 case TARGET_NR_getpriority:
4866 /* libc does special remapping of the return value of
4867 * sys_getpriority() so it's just easiest to call
4868 * sys_getpriority() directly rather than through libc. */
4869 ret = sys_getpriority(arg1, arg2);
4871 case TARGET_NR_setpriority:
4872 ret = get_errno(setpriority(arg1, arg2, arg3));
4874 #ifdef TARGET_NR_profil
4875 case TARGET_NR_profil:
4878 case TARGET_NR_statfs:
4879 if (!(p = lock_user_string(arg1)))
4881 ret = get_errno(statfs(path(p), &stfs));
4882 unlock_user(p, arg1, 0);
4884 if (!is_error(ret)) {
4885 struct target_statfs *target_stfs;
4887 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4889 __put_user(stfs.f_type, &target_stfs->f_type);
4890 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4891 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4892 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4893 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4894 __put_user(stfs.f_files, &target_stfs->f_files);
4895 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4896 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4897 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4898 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4899 unlock_user_struct(target_stfs, arg2, 1);
4902 case TARGET_NR_fstatfs:
4903 ret = get_errno(fstatfs(arg1, &stfs));
4904 goto convert_statfs;
4905 #ifdef TARGET_NR_statfs64
4906 case TARGET_NR_statfs64:
4907 if (!(p = lock_user_string(arg1)))
4909 ret = get_errno(statfs(path(p), &stfs));
4910 unlock_user(p, arg1, 0);
4912 if (!is_error(ret)) {
4913 struct target_statfs64 *target_stfs;
4915 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4917 __put_user(stfs.f_type, &target_stfs->f_type);
4918 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4919 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4920 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4921 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4922 __put_user(stfs.f_files, &target_stfs->f_files);
4923 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4924 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4925 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4926 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4927 unlock_user_struct(target_stfs, arg3, 1);
4930 case TARGET_NR_fstatfs64:
4931 ret = get_errno(fstatfs(arg1, &stfs));
4932 goto convert_statfs64;
4934 #ifdef TARGET_NR_ioperm
4935 case TARGET_NR_ioperm:
4938 #ifdef TARGET_NR_socketcall
4939 case TARGET_NR_socketcall:
4940 ret = do_socketcall(arg1, arg2);
4943 #ifdef TARGET_NR_accept
4944 case TARGET_NR_accept:
4945 ret = do_accept(arg1, arg2, arg3);
4948 #ifdef TARGET_NR_bind
4949 case TARGET_NR_bind:
4950 ret = do_bind(arg1, arg2, arg3);
4953 #ifdef TARGET_NR_connect
4954 case TARGET_NR_connect:
4955 ret = do_connect(arg1, arg2, arg3);
4958 #ifdef TARGET_NR_getpeername
4959 case TARGET_NR_getpeername:
4960 ret = do_getpeername(arg1, arg2, arg3);
4963 #ifdef TARGET_NR_getsockname
4964 case TARGET_NR_getsockname:
4965 ret = do_getsockname(arg1, arg2, arg3);
4968 #ifdef TARGET_NR_getsockopt
4969 case TARGET_NR_getsockopt:
4970 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4973 #ifdef TARGET_NR_listen
4974 case TARGET_NR_listen:
4975 ret = get_errno(listen(arg1, arg2));
4978 #ifdef TARGET_NR_recv
4979 case TARGET_NR_recv:
4980 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4983 #ifdef TARGET_NR_recvfrom
4984 case TARGET_NR_recvfrom:
4985 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4988 #ifdef TARGET_NR_recvmsg
4989 case TARGET_NR_recvmsg:
4990 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4993 #ifdef TARGET_NR_send
4994 case TARGET_NR_send:
4995 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4998 #ifdef TARGET_NR_sendmsg
4999 case TARGET_NR_sendmsg:
5000 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5003 #ifdef TARGET_NR_sendto
5004 case TARGET_NR_sendto:
5005 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5008 #ifdef TARGET_NR_shutdown
5009 case TARGET_NR_shutdown:
5010 ret = get_errno(shutdown(arg1, arg2));
5013 #ifdef TARGET_NR_socket
5014 case TARGET_NR_socket:
5015 ret = do_socket(arg1, arg2, arg3);
5018 #ifdef TARGET_NR_socketpair
5019 case TARGET_NR_socketpair:
5020 ret = do_socketpair(arg1, arg2, arg3, arg4);
5023 #ifdef TARGET_NR_setsockopt
5024 case TARGET_NR_setsockopt:
5025 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5029 case TARGET_NR_syslog:
5030 if (!(p = lock_user_string(arg2)))
5032 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5033 unlock_user(p, arg2, 0);
5036 case TARGET_NR_setitimer:
5038 struct itimerval value, ovalue, *pvalue;
5042 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5043 || copy_from_user_timeval(&pvalue->it_value,
5044 arg2 + sizeof(struct target_timeval)))
5049 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5050 if (!is_error(ret) && arg3) {
5051 if (copy_to_user_timeval(arg3,
5052 &ovalue.it_interval)
5053 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5059 case TARGET_NR_getitimer:
5061 struct itimerval value;
5063 ret = get_errno(getitimer(arg1, &value));
5064 if (!is_error(ret) && arg2) {
5065 if (copy_to_user_timeval(arg2,
5067 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5073 case TARGET_NR_stat:
5074 if (!(p = lock_user_string(arg1)))
5076 ret = get_errno(stat(path(p), &st));
5077 unlock_user(p, arg1, 0);
5079 case TARGET_NR_lstat:
5080 if (!(p = lock_user_string(arg1)))
5082 ret = get_errno(lstat(path(p), &st));
5083 unlock_user(p, arg1, 0);
5085 case TARGET_NR_fstat:
5087 ret = get_errno(fstat(arg1, &st));
5089 if (!is_error(ret)) {
5090 struct target_stat *target_st;
5092 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5094 __put_user(st.st_dev, &target_st->st_dev);
5095 __put_user(st.st_ino, &target_st->st_ino);
5096 __put_user(st.st_mode, &target_st->st_mode);
5097 __put_user(st.st_uid, &target_st->st_uid);
5098 __put_user(st.st_gid, &target_st->st_gid);
5099 __put_user(st.st_nlink, &target_st->st_nlink);
5100 __put_user(st.st_rdev, &target_st->st_rdev);
5101 __put_user(st.st_size, &target_st->st_size);
5102 __put_user(st.st_blksize, &target_st->st_blksize);
5103 __put_user(st.st_blocks, &target_st->st_blocks);
5104 __put_user(st.st_atime, &target_st->target_st_atime);
5105 __put_user(st.st_mtime, &target_st->target_st_mtime);
5106 __put_user(st.st_ctime, &target_st->target_st_ctime);
5107 unlock_user_struct(target_st, arg2, 1);
5111 #ifdef TARGET_NR_olduname
5112 case TARGET_NR_olduname:
5115 #ifdef TARGET_NR_iopl
5116 case TARGET_NR_iopl:
5119 case TARGET_NR_vhangup:
5120 ret = get_errno(vhangup());
5122 #ifdef TARGET_NR_idle
5123 case TARGET_NR_idle:
5126 #ifdef TARGET_NR_syscall
5127 case TARGET_NR_syscall:
5128 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5131 case TARGET_NR_wait4:
5134 abi_long status_ptr = arg2;
5135 struct rusage rusage, *rusage_ptr;
5136 abi_ulong target_rusage = arg4;
5138 rusage_ptr = &rusage;
5141 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5142 if (!is_error(ret)) {
5144 if (put_user_s32(status, status_ptr))
5148 host_to_target_rusage(target_rusage, &rusage);
5152 #ifdef TARGET_NR_swapoff
5153 case TARGET_NR_swapoff:
5154 if (!(p = lock_user_string(arg1)))
5156 ret = get_errno(swapoff(p));
5157 unlock_user(p, arg1, 0);
5160 case TARGET_NR_sysinfo:
5162 struct target_sysinfo *target_value;
5163 struct sysinfo value;
5164 ret = get_errno(sysinfo(&value));
5165 if (!is_error(ret) && arg1)
5167 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5169 __put_user(value.uptime, &target_value->uptime);
5170 __put_user(value.loads[0], &target_value->loads[0]);
5171 __put_user(value.loads[1], &target_value->loads[1]);
5172 __put_user(value.loads[2], &target_value->loads[2]);
5173 __put_user(value.totalram, &target_value->totalram);
5174 __put_user(value.freeram, &target_value->freeram);
5175 __put_user(value.sharedram, &target_value->sharedram);
5176 __put_user(value.bufferram, &target_value->bufferram);
5177 __put_user(value.totalswap, &target_value->totalswap);
5178 __put_user(value.freeswap, &target_value->freeswap);
5179 __put_user(value.procs, &target_value->procs);
5180 __put_user(value.totalhigh, &target_value->totalhigh);
5181 __put_user(value.freehigh, &target_value->freehigh);
5182 __put_user(value.mem_unit, &target_value->mem_unit);
5183 unlock_user_struct(target_value, arg1, 1);
5187 #ifdef TARGET_NR_ipc
5189 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5192 #ifdef TARGET_NR_semget
5193 case TARGET_NR_semget:
5194 ret = get_errno(semget(arg1, arg2, arg3));
5197 #ifdef TARGET_NR_semop
5198 case TARGET_NR_semop:
5199 ret = get_errno(do_semop(arg1, arg2, arg3));
5202 #ifdef TARGET_NR_semctl
5203 case TARGET_NR_semctl:
5204 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5207 #ifdef TARGET_NR_msgctl
5208 case TARGET_NR_msgctl:
5209 ret = do_msgctl(arg1, arg2, arg3);
5212 #ifdef TARGET_NR_msgget
5213 case TARGET_NR_msgget:
5214 ret = get_errno(msgget(arg1, arg2));
5217 #ifdef TARGET_NR_msgrcv
5218 case TARGET_NR_msgrcv:
5219 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5222 #ifdef TARGET_NR_msgsnd
5223 case TARGET_NR_msgsnd:
5224 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5227 #ifdef TARGET_NR_shmget
5228 case TARGET_NR_shmget:
5229 ret = get_errno(shmget(arg1, arg2, arg3));
5232 #ifdef TARGET_NR_shmctl
5233 case TARGET_NR_shmctl:
5234 ret = do_shmctl(arg1, arg2, arg3);
5237 #ifdef TARGET_NR_shmat
5238 case TARGET_NR_shmat:
5243 err = do_shmat(arg1, arg2, arg3, &_ret);
5244 ret = err ? err : _ret;
5248 #ifdef TARGET_NR_shmdt
5249 case TARGET_NR_shmdt:
5250 ret = do_shmdt(arg1);
5253 case TARGET_NR_fsync:
5254 ret = get_errno(fsync(arg1));
5256 case TARGET_NR_clone:
5257 #if defined(TARGET_SH4)
5258 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5259 #elif defined(TARGET_CRIS)
5260 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5262 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5265 #ifdef __NR_exit_group
5266 /* new thread calls */
5267 case TARGET_NR_exit_group:
5271 gdb_exit(cpu_env, arg1);
5272 ret = get_errno(exit_group(arg1));
5275 case TARGET_NR_setdomainname:
5276 if (!(p = lock_user_string(arg1)))
5278 ret = get_errno(setdomainname(p, arg2));
5279 unlock_user(p, arg1, 0);
5281 case TARGET_NR_uname:
5282 /* no need to transcode because we use the linux syscall */
5284 struct new_utsname * buf;
5286 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5288 ret = get_errno(sys_uname(buf));
5289 if (!is_error(ret)) {
5290 /* Overrite the native machine name with whatever is being
5292 strcpy (buf->machine, UNAME_MACHINE);
5293 /* Allow the user to override the reported release. */
5294 if (qemu_uname_release && *qemu_uname_release)
5295 strcpy (buf->release, qemu_uname_release);
5297 unlock_user_struct(buf, arg1, 1);
5301 case TARGET_NR_modify_ldt:
5302 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5304 #if !defined(TARGET_X86_64)
5305 case TARGET_NR_vm86old:
5307 case TARGET_NR_vm86:
5308 ret = do_vm86(cpu_env, arg1, arg2);
5312 case TARGET_NR_adjtimex:
5314 #ifdef TARGET_NR_create_module
5315 case TARGET_NR_create_module:
5317 case TARGET_NR_init_module:
5318 case TARGET_NR_delete_module:
5319 #ifdef TARGET_NR_get_kernel_syms
5320 case TARGET_NR_get_kernel_syms:
5323 case TARGET_NR_quotactl:
5325 case TARGET_NR_getpgid:
5326 ret = get_errno(getpgid(arg1));
5328 case TARGET_NR_fchdir:
5329 ret = get_errno(fchdir(arg1));
5331 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5332 case TARGET_NR_bdflush:
5335 #ifdef TARGET_NR_sysfs
5336 case TARGET_NR_sysfs:
5339 case TARGET_NR_personality:
5340 ret = get_errno(personality(arg1));
5342 #ifdef TARGET_NR_afs_syscall
5343 case TARGET_NR_afs_syscall:
5346 #ifdef TARGET_NR__llseek /* Not on alpha */
5347 case TARGET_NR__llseek:
5349 #if defined (__x86_64__)
5350 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5351 if (put_user_s64(ret, arg4))
5355 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5356 if (put_user_s64(res, arg4))
5362 case TARGET_NR_getdents:
5363 #if TARGET_ABI_BITS != 32
5365 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5367 struct target_dirent *target_dirp;
5368 struct linux_dirent *dirp;
5369 abi_long count = arg3;
5371 dirp = malloc(count);
5373 ret = -TARGET_ENOMEM;
5377 ret = get_errno(sys_getdents(arg1, dirp, count));
5378 if (!is_error(ret)) {
5379 struct linux_dirent *de;
5380 struct target_dirent *tde;
5382 int reclen, treclen;
5383 int count1, tnamelen;
5387 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5391 reclen = de->d_reclen;
5392 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5393 tde->d_reclen = tswap16(treclen);
5394 tde->d_ino = tswapl(de->d_ino);
5395 tde->d_off = tswapl(de->d_off);
5396 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
5399 /* XXX: may not be correct */
5400 pstrcpy(tde->d_name, tnamelen, de->d_name);
5401 de = (struct linux_dirent *)((char *)de + reclen);
5403 tde = (struct target_dirent *)((char *)tde + treclen);
5407 unlock_user(target_dirp, arg2, ret);
5413 struct linux_dirent *dirp;
5414 abi_long count = arg3;
5416 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5418 ret = get_errno(sys_getdents(arg1, dirp, count));
5419 if (!is_error(ret)) {
5420 struct linux_dirent *de;
5425 reclen = de->d_reclen;
5428 de->d_reclen = tswap16(reclen);
5429 tswapls(&de->d_ino);
5430 tswapls(&de->d_off);
5431 de = (struct linux_dirent *)((char *)de + reclen);
5435 unlock_user(dirp, arg2, ret);
5439 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5440 case TARGET_NR_getdents64:
5442 struct linux_dirent64 *dirp;
5443 abi_long count = arg3;
5444 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5446 ret = get_errno(sys_getdents64(arg1, dirp, count));
5447 if (!is_error(ret)) {
5448 struct linux_dirent64 *de;
5453 reclen = de->d_reclen;
5456 de->d_reclen = tswap16(reclen);
5457 tswap64s((uint64_t *)&de->d_ino);
5458 tswap64s((uint64_t *)&de->d_off);
5459 de = (struct linux_dirent64 *)((char *)de + reclen);
5463 unlock_user(dirp, arg2, ret);
5466 #endif /* TARGET_NR_getdents64 */
5467 #ifdef TARGET_NR__newselect
5468 case TARGET_NR__newselect:
5469 ret = do_select(arg1, arg2, arg3, arg4, arg5);
5472 #ifdef TARGET_NR_poll
5473 case TARGET_NR_poll:
5475 struct target_pollfd *target_pfd;
5476 unsigned int nfds = arg2;
5481 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
5484 pfd = alloca(sizeof(struct pollfd) * nfds);
5485 for(i = 0; i < nfds; i++) {
5486 pfd[i].fd = tswap32(target_pfd[i].fd);
5487 pfd[i].events = tswap16(target_pfd[i].events);
5489 ret = get_errno(poll(pfd, nfds, timeout));
5490 if (!is_error(ret)) {
5491 for(i = 0; i < nfds; i++) {
5492 target_pfd[i].revents = tswap16(pfd[i].revents);
5494 ret += nfds * (sizeof(struct target_pollfd)
5495 - sizeof(struct pollfd));
5497 unlock_user(target_pfd, arg1, ret);
5501 case TARGET_NR_flock:
5502 /* NOTE: the flock constant seems to be the same for every
5504 ret = get_errno(flock(arg1, arg2));
5506 case TARGET_NR_readv:
5511 vec = alloca(count * sizeof(struct iovec));
5512 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5514 ret = get_errno(readv(arg1, vec, count));
5515 unlock_iovec(vec, arg2, count, 1);
5518 case TARGET_NR_writev:
5523 vec = alloca(count * sizeof(struct iovec));
5524 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5526 ret = get_errno(writev(arg1, vec, count));
5527 unlock_iovec(vec, arg2, count, 0);
5530 case TARGET_NR_getsid:
5531 ret = get_errno(getsid(arg1));
5533 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5534 case TARGET_NR_fdatasync:
5535 ret = get_errno(fdatasync(arg1));
5538 case TARGET_NR__sysctl:
5539 /* We don't implement this, but ENOTDIR is always a safe
5541 ret = -TARGET_ENOTDIR;
5543 case TARGET_NR_sched_setparam:
5545 struct sched_param *target_schp;
5546 struct sched_param schp;
5548 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5550 schp.sched_priority = tswap32(target_schp->sched_priority);
5551 unlock_user_struct(target_schp, arg2, 0);
5552 ret = get_errno(sched_setparam(arg1, &schp));
5555 case TARGET_NR_sched_getparam:
5557 struct sched_param *target_schp;
5558 struct sched_param schp;
5559 ret = get_errno(sched_getparam(arg1, &schp));
5560 if (!is_error(ret)) {
5561 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5563 target_schp->sched_priority = tswap32(schp.sched_priority);
5564 unlock_user_struct(target_schp, arg2, 1);
5568 case TARGET_NR_sched_setscheduler:
5570 struct sched_param *target_schp;
5571 struct sched_param schp;
5572 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5574 schp.sched_priority = tswap32(target_schp->sched_priority);
5575 unlock_user_struct(target_schp, arg3, 0);
5576 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5579 case TARGET_NR_sched_getscheduler:
5580 ret = get_errno(sched_getscheduler(arg1));
5582 case TARGET_NR_sched_yield:
5583 ret = get_errno(sched_yield());
5585 case TARGET_NR_sched_get_priority_max:
5586 ret = get_errno(sched_get_priority_max(arg1));
5588 case TARGET_NR_sched_get_priority_min:
5589 ret = get_errno(sched_get_priority_min(arg1));
5591 case TARGET_NR_sched_rr_get_interval:
5594 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5595 if (!is_error(ret)) {
5596 host_to_target_timespec(arg2, &ts);
5600 case TARGET_NR_nanosleep:
5602 struct timespec req, rem;
5603 target_to_host_timespec(&req, arg1);
5604 ret = get_errno(nanosleep(&req, &rem));
5605 if (is_error(ret) && arg2) {
5606 host_to_target_timespec(arg2, &rem);
5610 #ifdef TARGET_NR_query_module
5611 case TARGET_NR_query_module:
5614 #ifdef TARGET_NR_nfsservctl
5615 case TARGET_NR_nfsservctl:
5618 case TARGET_NR_prctl:
5621 case PR_GET_PDEATHSIG:
5624 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5625 if (!is_error(ret) && arg2
5626 && put_user_ual(deathsig, arg2))
5631 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5635 #ifdef TARGET_NR_arch_prctl
5636 case TARGET_NR_arch_prctl:
5637 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5638 ret = do_arch_prctl(cpu_env, arg1, arg2);
5644 #ifdef TARGET_NR_pread
5645 case TARGET_NR_pread:
5647 if (((CPUARMState *)cpu_env)->eabi)
5650 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5652 ret = get_errno(pread(arg1, p, arg3, arg4));
5653 unlock_user(p, arg2, ret);
5655 case TARGET_NR_pwrite:
5657 if (((CPUARMState *)cpu_env)->eabi)
5660 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5662 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5663 unlock_user(p, arg2, 0);
5666 #ifdef TARGET_NR_pread64
5667 case TARGET_NR_pread64:
5668 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5670 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5671 unlock_user(p, arg2, ret);
5673 case TARGET_NR_pwrite64:
5674 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5676 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5677 unlock_user(p, arg2, 0);
5680 case TARGET_NR_getcwd:
5681 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5683 ret = get_errno(sys_getcwd1(p, arg2));
5684 unlock_user(p, arg1, ret);
5686 case TARGET_NR_capget:
5688 case TARGET_NR_capset:
5690 case TARGET_NR_sigaltstack:
5691 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5692 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5693 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5698 case TARGET_NR_sendfile:
5700 #ifdef TARGET_NR_getpmsg
5701 case TARGET_NR_getpmsg:
5704 #ifdef TARGET_NR_putpmsg
5705 case TARGET_NR_putpmsg:
5708 #ifdef TARGET_NR_vfork
5709 case TARGET_NR_vfork:
5710 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5714 #ifdef TARGET_NR_ugetrlimit
5715 case TARGET_NR_ugetrlimit:
5718 ret = get_errno(getrlimit(arg1, &rlim));
5719 if (!is_error(ret)) {
5720 struct target_rlimit *target_rlim;
5721 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5723 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5724 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5725 unlock_user_struct(target_rlim, arg2, 1);
5730 #ifdef TARGET_NR_truncate64
5731 case TARGET_NR_truncate64:
5732 if (!(p = lock_user_string(arg1)))
5734 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5735 unlock_user(p, arg1, 0);
5738 #ifdef TARGET_NR_ftruncate64
5739 case TARGET_NR_ftruncate64:
5740 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5743 #ifdef TARGET_NR_stat64
5744 case TARGET_NR_stat64:
5745 if (!(p = lock_user_string(arg1)))
5747 ret = get_errno(stat(path(p), &st));
5748 unlock_user(p, arg1, 0);
5750 ret = host_to_target_stat64(cpu_env, arg2, &st);
5753 #ifdef TARGET_NR_lstat64
5754 case TARGET_NR_lstat64:
5755 if (!(p = lock_user_string(arg1)))
5757 ret = get_errno(lstat(path(p), &st));
5758 unlock_user(p, arg1, 0);
5760 ret = host_to_target_stat64(cpu_env, arg2, &st);
5763 #ifdef TARGET_NR_fstat64
5764 case TARGET_NR_fstat64:
5765 ret = get_errno(fstat(arg1, &st));
5767 ret = host_to_target_stat64(cpu_env, arg2, &st);
5770 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5771 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5772 #ifdef TARGET_NR_fstatat64
5773 case TARGET_NR_fstatat64:
5775 #ifdef TARGET_NR_newfstatat
5776 case TARGET_NR_newfstatat:
5778 if (!(p = lock_user_string(arg2)))
5780 #ifdef __NR_fstatat64
5781 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5783 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
5786 ret = host_to_target_stat64(cpu_env, arg3, &st);
5790 case TARGET_NR_lchown:
5791 if (!(p = lock_user_string(arg1)))
5793 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5794 unlock_user(p, arg1, 0);
5796 case TARGET_NR_getuid:
5797 ret = get_errno(high2lowuid(getuid()));
5799 case TARGET_NR_getgid:
5800 ret = get_errno(high2lowgid(getgid()));
5802 case TARGET_NR_geteuid:
5803 ret = get_errno(high2lowuid(geteuid()));
5805 case TARGET_NR_getegid:
5806 ret = get_errno(high2lowgid(getegid()));
5808 case TARGET_NR_setreuid:
5809 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5811 case TARGET_NR_setregid:
5812 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5814 case TARGET_NR_getgroups:
5816 int gidsetsize = arg1;
5817 uint16_t *target_grouplist;
5821 grouplist = alloca(gidsetsize * sizeof(gid_t));
5822 ret = get_errno(getgroups(gidsetsize, grouplist));
5823 if (gidsetsize == 0)
5825 if (!is_error(ret)) {
5826 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5827 if (!target_grouplist)
5829 for(i = 0;i < ret; i++)
5830 target_grouplist[i] = tswap16(grouplist[i]);
5831 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5835 case TARGET_NR_setgroups:
5837 int gidsetsize = arg1;
5838 uint16_t *target_grouplist;
5842 grouplist = alloca(gidsetsize * sizeof(gid_t));
5843 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5844 if (!target_grouplist) {
5845 ret = -TARGET_EFAULT;
5848 for(i = 0;i < gidsetsize; i++)
5849 grouplist[i] = tswap16(target_grouplist[i]);
5850 unlock_user(target_grouplist, arg2, 0);
5851 ret = get_errno(setgroups(gidsetsize, grouplist));
5854 case TARGET_NR_fchown:
5855 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5857 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5858 case TARGET_NR_fchownat:
5859 if (!(p = lock_user_string(arg2)))
5861 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5862 unlock_user(p, arg2, 0);
5865 #ifdef TARGET_NR_setresuid
5866 case TARGET_NR_setresuid:
5867 ret = get_errno(setresuid(low2highuid(arg1),
5869 low2highuid(arg3)));
5872 #ifdef TARGET_NR_getresuid
5873 case TARGET_NR_getresuid:
5875 uid_t ruid, euid, suid;
5876 ret = get_errno(getresuid(&ruid, &euid, &suid));
5877 if (!is_error(ret)) {
5878 if (put_user_u16(high2lowuid(ruid), arg1)
5879 || put_user_u16(high2lowuid(euid), arg2)
5880 || put_user_u16(high2lowuid(suid), arg3))
5886 #ifdef TARGET_NR_getresgid
5887 case TARGET_NR_setresgid:
5888 ret = get_errno(setresgid(low2highgid(arg1),
5890 low2highgid(arg3)));
5893 #ifdef TARGET_NR_getresgid
5894 case TARGET_NR_getresgid:
5896 gid_t rgid, egid, sgid;
5897 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5898 if (!is_error(ret)) {
5899 if (put_user_u16(high2lowgid(rgid), arg1)
5900 || put_user_u16(high2lowgid(egid), arg2)
5901 || put_user_u16(high2lowgid(sgid), arg3))
5907 case TARGET_NR_chown:
5908 if (!(p = lock_user_string(arg1)))
5910 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5911 unlock_user(p, arg1, 0);
5913 case TARGET_NR_setuid:
5914 ret = get_errno(setuid(low2highuid(arg1)));
5916 case TARGET_NR_setgid:
5917 ret = get_errno(setgid(low2highgid(arg1)));
5919 case TARGET_NR_setfsuid:
5920 ret = get_errno(setfsuid(arg1));
5922 case TARGET_NR_setfsgid:
5923 ret = get_errno(setfsgid(arg1));
5925 #endif /* USE_UID16 */
5927 #ifdef TARGET_NR_lchown32
5928 case TARGET_NR_lchown32:
5929 if (!(p = lock_user_string(arg1)))
5931 ret = get_errno(lchown(p, arg2, arg3));
5932 unlock_user(p, arg1, 0);
5935 #ifdef TARGET_NR_getuid32
5936 case TARGET_NR_getuid32:
5937 ret = get_errno(getuid());
5941 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5942 /* Alpha specific */
5943 case TARGET_NR_getxuid:
5947 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
5949 ret = get_errno(getuid());
5952 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5953 /* Alpha specific */
5954 case TARGET_NR_getxgid:
5958 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
5960 ret = get_errno(getgid());
5964 #ifdef TARGET_NR_getgid32
5965 case TARGET_NR_getgid32:
5966 ret = get_errno(getgid());
5969 #ifdef TARGET_NR_geteuid32
5970 case TARGET_NR_geteuid32:
5971 ret = get_errno(geteuid());
5974 #ifdef TARGET_NR_getegid32
5975 case TARGET_NR_getegid32:
5976 ret = get_errno(getegid());
5979 #ifdef TARGET_NR_setreuid32
5980 case TARGET_NR_setreuid32:
5981 ret = get_errno(setreuid(arg1, arg2));
5984 #ifdef TARGET_NR_setregid32
5985 case TARGET_NR_setregid32:
5986 ret = get_errno(setregid(arg1, arg2));
5989 #ifdef TARGET_NR_getgroups32
5990 case TARGET_NR_getgroups32:
5992 int gidsetsize = arg1;
5993 uint32_t *target_grouplist;
5997 grouplist = alloca(gidsetsize * sizeof(gid_t));
5998 ret = get_errno(getgroups(gidsetsize, grouplist));
5999 if (gidsetsize == 0)
6001 if (!is_error(ret)) {
6002 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6003 if (!target_grouplist) {
6004 ret = -TARGET_EFAULT;
6007 for(i = 0;i < ret; i++)
6008 target_grouplist[i] = tswap32(grouplist[i]);
6009 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6014 #ifdef TARGET_NR_setgroups32
6015 case TARGET_NR_setgroups32:
6017 int gidsetsize = arg1;
6018 uint32_t *target_grouplist;
6022 grouplist = alloca(gidsetsize * sizeof(gid_t));
6023 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6024 if (!target_grouplist) {
6025 ret = -TARGET_EFAULT;
6028 for(i = 0;i < gidsetsize; i++)
6029 grouplist[i] = tswap32(target_grouplist[i]);
6030 unlock_user(target_grouplist, arg2, 0);
6031 ret = get_errno(setgroups(gidsetsize, grouplist));
6035 #ifdef TARGET_NR_fchown32
6036 case TARGET_NR_fchown32:
6037 ret = get_errno(fchown(arg1, arg2, arg3));
6040 #ifdef TARGET_NR_setresuid32
6041 case TARGET_NR_setresuid32:
6042 ret = get_errno(setresuid(arg1, arg2, arg3));
6045 #ifdef TARGET_NR_getresuid32
6046 case TARGET_NR_getresuid32:
6048 uid_t ruid, euid, suid;
6049 ret = get_errno(getresuid(&ruid, &euid, &suid));
6050 if (!is_error(ret)) {
6051 if (put_user_u32(ruid, arg1)
6052 || put_user_u32(euid, arg2)
6053 || put_user_u32(suid, arg3))
6059 #ifdef TARGET_NR_setresgid32
6060 case TARGET_NR_setresgid32:
6061 ret = get_errno(setresgid(arg1, arg2, arg3));
6064 #ifdef TARGET_NR_getresgid32
6065 case TARGET_NR_getresgid32:
6067 gid_t rgid, egid, sgid;
6068 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6069 if (!is_error(ret)) {
6070 if (put_user_u32(rgid, arg1)
6071 || put_user_u32(egid, arg2)
6072 || put_user_u32(sgid, arg3))
6078 #ifdef TARGET_NR_chown32
6079 case TARGET_NR_chown32:
6080 if (!(p = lock_user_string(arg1)))
6082 ret = get_errno(chown(p, arg2, arg3));
6083 unlock_user(p, arg1, 0);
6086 #ifdef TARGET_NR_setuid32
6087 case TARGET_NR_setuid32:
6088 ret = get_errno(setuid(arg1));
6091 #ifdef TARGET_NR_setgid32
6092 case TARGET_NR_setgid32:
6093 ret = get_errno(setgid(arg1));
6096 #ifdef TARGET_NR_setfsuid32
6097 case TARGET_NR_setfsuid32:
6098 ret = get_errno(setfsuid(arg1));
6101 #ifdef TARGET_NR_setfsgid32
6102 case TARGET_NR_setfsgid32:
6103 ret = get_errno(setfsgid(arg1));
6107 case TARGET_NR_pivot_root:
6109 #ifdef TARGET_NR_mincore
6110 case TARGET_NR_mincore:
6113 ret = -TARGET_EFAULT;
6114 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6116 if (!(p = lock_user_string(arg3)))
6118 ret = get_errno(mincore(a, arg2, p));
6119 unlock_user(p, arg3, ret);
6121 unlock_user(a, arg1, 0);
6125 #ifdef TARGET_NR_arm_fadvise64_64
6126 case TARGET_NR_arm_fadvise64_64:
6129 * arm_fadvise64_64 looks like fadvise64_64 but
6130 * with different argument order
6138 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6139 #ifdef TARGET_NR_fadvise64_64
6140 case TARGET_NR_fadvise64_64:
6142 /* This is a hint, so ignoring and returning success is ok. */
6146 #ifdef TARGET_NR_madvise
6147 case TARGET_NR_madvise:
6148 /* A straight passthrough may not be safe because qemu sometimes
6149 turns private flie-backed mappings into anonymous mappings.
6150 This will break MADV_DONTNEED.
6151 This is a hint, so ignoring and returning success is ok. */
6155 #if TARGET_ABI_BITS == 32
6156 case TARGET_NR_fcntl64:
6160 struct target_flock64 *target_fl;
6162 struct target_eabi_flock64 *target_efl;
6166 case TARGET_F_GETLK64:
6169 case TARGET_F_SETLK64:
6172 case TARGET_F_SETLKW64:
6181 case TARGET_F_GETLK64:
6183 if (((CPUARMState *)cpu_env)->eabi) {
6184 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6186 fl.l_type = tswap16(target_efl->l_type);
6187 fl.l_whence = tswap16(target_efl->l_whence);
6188 fl.l_start = tswap64(target_efl->l_start);
6189 fl.l_len = tswap64(target_efl->l_len);
6190 fl.l_pid = tswapl(target_efl->l_pid);
6191 unlock_user_struct(target_efl, arg3, 0);
6195 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6197 fl.l_type = tswap16(target_fl->l_type);
6198 fl.l_whence = tswap16(target_fl->l_whence);
6199 fl.l_start = tswap64(target_fl->l_start);
6200 fl.l_len = tswap64(target_fl->l_len);
6201 fl.l_pid = tswapl(target_fl->l_pid);
6202 unlock_user_struct(target_fl, arg3, 0);
6204 ret = get_errno(fcntl(arg1, cmd, &fl));
6207 if (((CPUARMState *)cpu_env)->eabi) {
6208 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6210 target_efl->l_type = tswap16(fl.l_type);
6211 target_efl->l_whence = tswap16(fl.l_whence);
6212 target_efl->l_start = tswap64(fl.l_start);
6213 target_efl->l_len = tswap64(fl.l_len);
6214 target_efl->l_pid = tswapl(fl.l_pid);
6215 unlock_user_struct(target_efl, arg3, 1);
6219 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6221 target_fl->l_type = tswap16(fl.l_type);
6222 target_fl->l_whence = tswap16(fl.l_whence);
6223 target_fl->l_start = tswap64(fl.l_start);
6224 target_fl->l_len = tswap64(fl.l_len);
6225 target_fl->l_pid = tswapl(fl.l_pid);
6226 unlock_user_struct(target_fl, arg3, 1);
6231 case TARGET_F_SETLK64:
6232 case TARGET_F_SETLKW64:
6234 if (((CPUARMState *)cpu_env)->eabi) {
6235 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6237 fl.l_type = tswap16(target_efl->l_type);
6238 fl.l_whence = tswap16(target_efl->l_whence);
6239 fl.l_start = tswap64(target_efl->l_start);
6240 fl.l_len = tswap64(target_efl->l_len);
6241 fl.l_pid = tswapl(target_efl->l_pid);
6242 unlock_user_struct(target_efl, arg3, 0);
6246 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6248 fl.l_type = tswap16(target_fl->l_type);
6249 fl.l_whence = tswap16(target_fl->l_whence);
6250 fl.l_start = tswap64(target_fl->l_start);
6251 fl.l_len = tswap64(target_fl->l_len);
6252 fl.l_pid = tswapl(target_fl->l_pid);
6253 unlock_user_struct(target_fl, arg3, 0);
6255 ret = get_errno(fcntl(arg1, cmd, &fl));
6258 ret = do_fcntl(arg1, cmd, arg3);
6264 #ifdef TARGET_NR_cacheflush
6265 case TARGET_NR_cacheflush:
6266 /* self-modifying code is handled automatically, so nothing needed */
6270 #ifdef TARGET_NR_security
6271 case TARGET_NR_security:
6274 #ifdef TARGET_NR_getpagesize
6275 case TARGET_NR_getpagesize:
6276 ret = TARGET_PAGE_SIZE;
6279 case TARGET_NR_gettid:
6280 ret = get_errno(gettid());
6282 #ifdef TARGET_NR_readahead
6283 case TARGET_NR_readahead:
6284 #if TARGET_ABI_BITS == 32
6286 if (((CPUARMState *)cpu_env)->eabi)
6293 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
6295 ret = get_errno(readahead(arg1, arg2, arg3));
6299 #ifdef TARGET_NR_setxattr
6300 case TARGET_NR_setxattr:
6301 case TARGET_NR_lsetxattr:
6302 case TARGET_NR_fsetxattr:
6303 case TARGET_NR_getxattr:
6304 case TARGET_NR_lgetxattr:
6305 case TARGET_NR_fgetxattr:
6306 case TARGET_NR_listxattr:
6307 case TARGET_NR_llistxattr:
6308 case TARGET_NR_flistxattr:
6309 case TARGET_NR_removexattr:
6310 case TARGET_NR_lremovexattr:
6311 case TARGET_NR_fremovexattr:
6312 goto unimplemented_nowarn;
6314 #ifdef TARGET_NR_set_thread_area
6315 case TARGET_NR_set_thread_area:
6316 #if defined(TARGET_MIPS)
6317 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
6320 #elif defined(TARGET_CRIS)
6322 ret = -TARGET_EINVAL;
6324 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
6328 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6329 ret = do_set_thread_area(cpu_env, arg1);
6332 goto unimplemented_nowarn;
6335 #ifdef TARGET_NR_get_thread_area
6336 case TARGET_NR_get_thread_area:
6337 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6338 ret = do_get_thread_area(cpu_env, arg1);
6340 goto unimplemented_nowarn;
6343 #ifdef TARGET_NR_getdomainname
6344 case TARGET_NR_getdomainname:
6345 goto unimplemented_nowarn;
6348 #ifdef TARGET_NR_clock_gettime
6349 case TARGET_NR_clock_gettime:
6352 ret = get_errno(clock_gettime(arg1, &ts));
6353 if (!is_error(ret)) {
6354 host_to_target_timespec(arg2, &ts);
6359 #ifdef TARGET_NR_clock_getres
6360 case TARGET_NR_clock_getres:
6363 ret = get_errno(clock_getres(arg1, &ts));
6364 if (!is_error(ret)) {
6365 host_to_target_timespec(arg2, &ts);
6370 #ifdef TARGET_NR_clock_nanosleep
6371 case TARGET_NR_clock_nanosleep:
6374 target_to_host_timespec(&ts, arg3);
6375 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
6377 host_to_target_timespec(arg4, &ts);
6382 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6383 case TARGET_NR_set_tid_address:
6384 ret = get_errno(set_tid_address((int *)g2h(arg1)));
6388 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6389 case TARGET_NR_tkill:
6390 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
6394 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6395 case TARGET_NR_tgkill:
6396 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
6397 target_to_host_signal(arg3)));
6401 #ifdef TARGET_NR_set_robust_list
6402 case TARGET_NR_set_robust_list:
6403 goto unimplemented_nowarn;
6406 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6407 case TARGET_NR_utimensat:
6409 struct timespec ts[2];
6410 target_to_host_timespec(ts, arg3);
6411 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
6413 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
6415 if (!(p = lock_user_string(arg2))) {
6416 ret = -TARGET_EFAULT;
6419 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
6420 unlock_user(p, arg2, 0);
6425 #if defined(USE_NPTL)
6426 case TARGET_NR_futex:
6427 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
6430 #ifdef TARGET_NR_inotify_init
6431 case TARGET_NR_inotify_init:
6432 ret = get_errno(sys_inotify_init());
6435 #ifdef TARGET_NR_inotify_add_watch
6436 case TARGET_NR_inotify_add_watch:
6437 p = lock_user_string(arg2);
6438 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
6439 unlock_user(p, arg2, 0);
6442 #ifdef TARGET_NR_inotify_rm_watch
6443 case TARGET_NR_inotify_rm_watch:
6444 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
6448 #ifdef TARGET_NR_mq_open
6449 case TARGET_NR_mq_open:
6451 struct mq_attr posix_mq_attr;
6453 p = lock_user_string(arg1 - 1);
6455 copy_from_user_mq_attr (&posix_mq_attr, arg4);
6456 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
6457 unlock_user (p, arg1, 0);
6461 case TARGET_NR_mq_unlink:
6462 p = lock_user_string(arg1 - 1);
6463 ret = get_errno(mq_unlink(p));
6464 unlock_user (p, arg1, 0);
6467 case TARGET_NR_mq_timedsend:
6471 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6473 target_to_host_timespec(&ts, arg5);
6474 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
6475 host_to_target_timespec(arg5, &ts);
6478 ret = get_errno(mq_send(arg1, p, arg3, arg4));
6479 unlock_user (p, arg2, arg3);
6483 case TARGET_NR_mq_timedreceive:
6488 p = lock_user (VERIFY_READ, arg2, arg3, 1);
6490 target_to_host_timespec(&ts, arg5);
6491 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
6492 host_to_target_timespec(arg5, &ts);
6495 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
6496 unlock_user (p, arg2, arg3);
6498 put_user_u32(prio, arg4);
6502 /* Not implemented for now... */
6503 /* case TARGET_NR_mq_notify: */
6506 case TARGET_NR_mq_getsetattr:
6508 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
6511 ret = mq_getattr(arg1, &posix_mq_attr_out);
6512 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
6515 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
6516 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
6525 gemu_log("qemu: Unsupported syscall: %d\n", num);
6526 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6527 unimplemented_nowarn:
6529 ret = -TARGET_ENOSYS;
6534 gemu_log(" = %ld\n", ret);
6537 print_syscall_ret(num, ret);
6540 ret = -TARGET_EFAULT;