2 * i386 emulator main execution loop
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env->jmp_env, 1);
51 /* exit the current TB from a signal handler. The host registers are
52 restored in a state compatible with the CPU emulator
54 void cpu_resume_from_signal(CPUState *env1, void *puc)
56 #if !defined(CONFIG_SOFTMMU)
57 struct ucontext *uc = puc;
62 /* XXX: restore cpu registers saved in host registers */
64 #if !defined(CONFIG_SOFTMMU)
66 /* XXX: use siglongjmp ? */
67 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
70 longjmp(env->jmp_env, 1);
73 /* main execution loop */
75 int cpu_exec(CPUState *env1)
77 int saved_T0, saved_T1, saved_T2;
104 int saved_i7, tmp_T0;
106 int code_gen_size, ret, interrupt_request;
107 void (*gen_func)(void);
108 TranslationBlock *tb, **ptb;
109 target_ulong cs_base, pc;
113 /* first we save global registers */
120 /* we also save i7 because longjmp may not restore it */
121 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
124 #if defined(TARGET_I386)
151 /* put eflags in CPU temporary format */
152 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
153 DF = 1 - (2 * ((env->eflags >> 10) & 1));
154 CC_OP = CC_OP_EFLAGS;
155 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
156 #elif defined(TARGET_ARM)
160 env->CF = (psr >> 29) & 1;
161 env->NZF = (psr & 0xc0000000) ^ 0x40000000;
162 env->VF = (psr << 3) & 0x80000000;
163 env->QF = (psr >> 27) & 1;
164 env->cpsr = psr & ~CACHED_CPSR_BITS;
166 #elif defined(TARGET_SPARC)
167 #elif defined(TARGET_PPC)
169 #error unsupported target CPU
171 env->exception_index = -1;
173 /* prepare setjmp context for exception handling */
175 if (setjmp(env->jmp_env) == 0) {
176 env->current_tb = NULL;
177 /* if an exception is pending, we execute it here */
178 if (env->exception_index >= 0) {
179 if (env->exception_index >= EXCP_INTERRUPT) {
180 /* exit request from the cpu execution loop */
181 ret = env->exception_index;
183 } else if (env->user_mode_only) {
184 /* if user mode only, we simulate a fake exception
185 which will be hanlded outside the cpu execution
187 #if defined(TARGET_I386)
188 do_interrupt_user(env->exception_index,
189 env->exception_is_int,
191 env->exception_next_eip);
193 ret = env->exception_index;
196 #if defined(TARGET_I386)
197 /* simulate a real cpu exception. On i386, it can
198 trigger new exceptions, but we do not handle
199 double or triple faults yet. */
200 do_interrupt(env->exception_index,
201 env->exception_is_int,
203 env->exception_next_eip, 0);
204 #elif defined(TARGET_PPC)
206 #elif defined(TARGET_SPARC)
207 do_interrupt(env->exception_index,
210 env->exception_next_pc, 0);
213 env->exception_index = -1;
215 T0 = 0; /* force lookup of first TB */
218 /* g1 can be modified by some libc? functions */
221 interrupt_request = env->interrupt_request;
222 if (__builtin_expect(interrupt_request, 0)) {
223 #if defined(TARGET_I386)
224 /* if hardware interrupt pending, we execute it */
225 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
226 (env->eflags & IF_MASK) &&
227 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
229 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
230 intno = cpu_get_pic_interrupt(env);
231 if (loglevel & CPU_LOG_TB_IN_ASM) {
232 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
234 do_interrupt(intno, 0, 0, 0, 1);
235 /* ensure that no TB jump will be modified as
236 the program flow was changed */
243 #elif defined(TARGET_PPC)
245 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
250 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
252 env->exception_index = EXCP_EXTERNAL;
255 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
256 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
258 env->exception_index = EXCP_DECR;
261 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
264 #elif defined(TARGET_SPARC)
265 if (interrupt_request & CPU_INTERRUPT_HARD) {
266 do_interrupt(env->interrupt_index, 0, 0, 0, 0);
267 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
268 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
269 //do_interrupt(0, 0, 0, 0, 0);
270 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
273 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
274 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
275 /* ensure that no TB jump will be modified as
276 the program flow was changed */
283 if (interrupt_request & CPU_INTERRUPT_EXIT) {
284 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
285 env->exception_index = EXCP_INTERRUPT;
290 if ((loglevel & CPU_LOG_EXEC)) {
291 #if defined(TARGET_I386)
292 /* restore flags in standard format */
293 env->regs[R_EAX] = EAX;
294 env->regs[R_EBX] = EBX;
295 env->regs[R_ECX] = ECX;
296 env->regs[R_EDX] = EDX;
297 env->regs[R_ESI] = ESI;
298 env->regs[R_EDI] = EDI;
299 env->regs[R_EBP] = EBP;
300 env->regs[R_ESP] = ESP;
301 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
302 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
303 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
304 #elif defined(TARGET_ARM)
305 env->cpsr = compute_cpsr();
306 cpu_dump_state(env, logfile, fprintf, 0);
307 env->cpsr &= ~CACHED_CPSR_BITS;
308 #elif defined(TARGET_SPARC)
309 cpu_dump_state (env, logfile, fprintf, 0);
310 #elif defined(TARGET_PPC)
311 cpu_dump_state(env, logfile, fprintf, 0);
313 #error unsupported target CPU
317 /* we record a subset of the CPU state. It will
318 always be the same before a given translated block
320 #if defined(TARGET_I386)
322 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
323 cs_base = env->segs[R_CS].base;
324 pc = cs_base + env->eip;
325 #elif defined(TARGET_ARM)
329 #elif defined(TARGET_SPARC)
333 #elif defined(TARGET_PPC)
338 #error unsupported CPU
340 tb = tb_find(&ptb, pc, cs_base,
343 TranslationBlock **ptb1;
345 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
350 tb_invalidated_flag = 0;
352 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
354 /* find translated block using physical mappings */
355 phys_pc = get_phys_addr_code(env, pc);
356 phys_page1 = phys_pc & TARGET_PAGE_MASK;
358 h = tb_phys_hash_func(phys_pc);
359 ptb1 = &tb_phys_hash[h];
365 tb->page_addr[0] == phys_page1 &&
366 tb->cs_base == cs_base &&
367 tb->flags == flags) {
368 /* check next page if needed */
369 if (tb->page_addr[1] != -1) {
370 virt_page2 = (pc & TARGET_PAGE_MASK) +
372 phys_page2 = get_phys_addr_code(env, virt_page2);
373 if (tb->page_addr[1] == phys_page2)
379 ptb1 = &tb->phys_hash_next;
382 /* if no translated code available, then translate it now */
385 /* flush must be done */
387 /* cannot fail at this point */
389 /* don't forget to invalidate previous TB info */
390 ptb = &tb_hash[tb_hash_func(pc)];
393 tc_ptr = code_gen_ptr;
395 tb->cs_base = cs_base;
397 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
398 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
400 /* check next page if needed */
401 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
403 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
404 phys_page2 = get_phys_addr_code(env, virt_page2);
406 tb_link_phys(tb, phys_pc, phys_page2);
409 if (tb_invalidated_flag) {
410 /* as some TB could have been invalidated because
411 of memory exceptions while generating the code, we
412 must recompute the hash index here */
413 ptb = &tb_hash[tb_hash_func(pc)];
415 ptb = &(*ptb)->hash_next;
418 /* we add the TB in the virtual pc hash table */
420 tb->hash_next = NULL;
422 spin_unlock(&tb_lock);
425 if ((loglevel & CPU_LOG_EXEC)) {
426 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
427 (long)tb->tc_ptr, tb->pc,
428 lookup_symbol(tb->pc));
434 /* see if we can patch the calling TB. */
437 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
438 && (tb->cflags & CF_CODE_COPY) ==
439 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
443 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
444 #if defined(USE_CODE_COPY)
445 /* propagates the FP use info */
446 ((TranslationBlock *)(T0 & ~3))->cflags |=
447 (tb->cflags & CF_FP_USED);
449 spin_unlock(&tb_lock);
453 env->current_tb = tb;
454 /* execute the generated code */
455 gen_func = (void *)tc_ptr;
456 #if defined(__sparc__)
457 __asm__ __volatile__("call %0\n\t"
461 : "i0", "i1", "i2", "i3", "i4", "i5");
462 #elif defined(__arm__)
463 asm volatile ("mov pc, %0\n\t"
464 ".global exec_loop\n\t"
468 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
469 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
471 if (!(tb->cflags & CF_CODE_COPY)) {
472 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
473 save_native_fp_state(env);
477 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
478 restore_native_fp_state(env);
480 /* we work with native eflags */
481 CC_SRC = cc_table[CC_OP].compute_all();
482 CC_OP = CC_OP_EFLAGS;
483 asm(".globl exec_loop\n"
488 " fs movl %11, %%eax\n"
489 " andl $0x400, %%eax\n"
490 " fs orl %8, %%eax\n"
493 " fs movl %%esp, %12\n"
494 " fs movl %0, %%eax\n"
495 " fs movl %1, %%ecx\n"
496 " fs movl %2, %%edx\n"
497 " fs movl %3, %%ebx\n"
498 " fs movl %4, %%esp\n"
499 " fs movl %5, %%ebp\n"
500 " fs movl %6, %%esi\n"
501 " fs movl %7, %%edi\n"
504 " fs movl %%esp, %4\n"
505 " fs movl %12, %%esp\n"
506 " fs movl %%eax, %0\n"
507 " fs movl %%ecx, %1\n"
508 " fs movl %%edx, %2\n"
509 " fs movl %%ebx, %3\n"
510 " fs movl %%ebp, %5\n"
511 " fs movl %%esi, %6\n"
512 " fs movl %%edi, %7\n"
515 " movl %%eax, %%ecx\n"
516 " andl $0x400, %%ecx\n"
518 " andl $0x8d5, %%eax\n"
519 " fs movl %%eax, %8\n"
521 " subl %%ecx, %%eax\n"
522 " fs movl %%eax, %11\n"
523 " fs movl %9, %%ebx\n" /* get T0 value */
526 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
527 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
528 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
529 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
530 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
531 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
532 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
533 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
534 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
535 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
537 "m" (*(uint8_t *)offsetof(CPUState, df)),
538 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
546 env->current_tb = NULL;
547 /* reset soft MMU for next block (it can currently
548 only be set by a memory fault) */
549 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
550 if (env->hflags & HF_SOFTMMU_MASK) {
551 env->hflags &= ~HF_SOFTMMU_MASK;
552 /* do not allow linking to another block */
563 #if defined(TARGET_I386)
564 #if defined(USE_CODE_COPY)
565 if (env->native_fp_regs) {
566 save_native_fp_state(env);
569 /* restore flags in standard format */
570 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
572 /* restore global registers */
597 #elif defined(TARGET_ARM)
598 env->cpsr = compute_cpsr();
599 #elif defined(TARGET_SPARC)
600 #elif defined(TARGET_PPC)
602 #error unsupported target CPU
605 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
614 /* must only be called from the generated code as an exception can be
616 void tb_invalidate_page_range(target_ulong start, target_ulong end)
618 /* XXX: cannot enable it yet because it yields to MMU exception
619 where NIP != read address on PowerPC */
621 target_ulong phys_addr;
622 phys_addr = get_phys_addr_code(env, start);
623 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
627 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
629 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
631 CPUX86State *saved_env;
635 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
637 cpu_x86_load_seg_cache(env, seg_reg, selector,
638 (selector << 4), 0xffff, 0);
640 load_seg(seg_reg, selector);
645 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
647 CPUX86State *saved_env;
652 helper_fsave((target_ulong)ptr, data32);
657 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
659 CPUX86State *saved_env;
664 helper_frstor((target_ulong)ptr, data32);
669 #endif /* TARGET_I386 */
671 #if !defined(CONFIG_SOFTMMU)
673 #if defined(TARGET_I386)
675 /* 'pc' is the host PC at which the exception was raised. 'address' is
676 the effective address of the memory exception. 'is_write' is 1 if a
677 write caused the exception and otherwise 0'. 'old_set' is the
678 signal set which should be restored */
679 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
680 int is_write, sigset_t *old_set,
683 TranslationBlock *tb;
687 env = cpu_single_env; /* XXX: find a correct solution for multithread */
688 #if defined(DEBUG_SIGNAL)
689 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
690 pc, address, is_write, *(unsigned long *)old_set);
692 /* XXX: locking issue */
693 if (is_write && page_unprotect(address, pc, puc)) {
697 /* see if it is an MMU fault */
698 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
699 ((env->hflags & HF_CPL_MASK) == 3), 0);
701 return 0; /* not an MMU fault */
703 return 1; /* the MMU fault was handled without causing real CPU fault */
704 /* now we have a real cpu fault */
707 /* the PC is inside the translated code. It means that we have
708 a virtual CPU fault */
709 cpu_restore_state(tb, env, pc, puc);
713 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
714 env->eip, env->cr[2], env->error_code);
716 /* we restore the process signal mask as the sigreturn should
717 do it (XXX: use sigsetjmp) */
718 sigprocmask(SIG_SETMASK, old_set, NULL);
719 raise_exception_err(EXCP0E_PAGE, env->error_code);
721 /* activate soft MMU for this block */
722 env->hflags |= HF_SOFTMMU_MASK;
723 cpu_resume_from_signal(env, puc);
725 /* never comes here */
729 #elif defined(TARGET_ARM)
730 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
731 int is_write, sigset_t *old_set,
734 /* XXX: locking issue */
735 if (is_write && page_unprotect(address, pc, puc)) {
740 #elif defined(TARGET_SPARC)
741 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
742 int is_write, sigset_t *old_set,
745 /* XXX: locking issue */
746 if (is_write && page_unprotect(address, pc, puc)) {
751 #elif defined (TARGET_PPC)
752 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
753 int is_write, sigset_t *old_set,
756 TranslationBlock *tb;
761 env = cpu_single_env; /* XXX: find a correct solution for multithread */
763 #if defined(DEBUG_SIGNAL)
764 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
765 pc, address, is_write, *(unsigned long *)old_set);
767 /* XXX: locking issue */
768 if (is_write && page_unprotect(address, pc, puc)) {
772 /* see if it is an MMU fault */
773 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
775 return 0; /* not an MMU fault */
777 return 1; /* the MMU fault was handled without causing real CPU fault */
779 /* now we have a real cpu fault */
782 /* the PC is inside the translated code. It means that we have
783 a virtual CPU fault */
784 cpu_restore_state(tb, env, pc, puc);
788 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
789 env->nip, env->error_code, tb);
791 /* we restore the process signal mask as the sigreturn should
792 do it (XXX: use sigsetjmp) */
793 sigprocmask(SIG_SETMASK, old_set, NULL);
794 do_raise_exception_err(env->exception_index, env->error_code);
796 /* activate soft MMU for this block */
797 cpu_resume_from_signal(env, puc);
799 /* never comes here */
803 #error unsupported target CPU
806 #if defined(__i386__)
808 #if defined(USE_CODE_COPY)
809 static void cpu_send_trap(unsigned long pc, int trap,
812 TranslationBlock *tb;
815 env = cpu_single_env; /* XXX: find a correct solution for multithread */
816 /* now we have a real cpu fault */
819 /* the PC is inside the translated code. It means that we have
820 a virtual CPU fault */
821 cpu_restore_state(tb, env, pc, uc);
823 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
824 raise_exception_err(trap, env->error_code);
828 int cpu_signal_handler(int host_signum, struct siginfo *info,
831 struct ucontext *uc = puc;
839 #define REG_TRAPNO TRAPNO
841 pc = uc->uc_mcontext.gregs[REG_EIP];
842 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
843 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
844 if (trapno == 0x00 || trapno == 0x05) {
845 /* send division by zero or bound exception */
846 cpu_send_trap(pc, trapno, uc);
850 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
852 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
853 &uc->uc_sigmask, puc);
856 #elif defined(__x86_64__)
858 int cpu_signal_handler(int host_signum, struct siginfo *info,
861 struct ucontext *uc = puc;
864 pc = uc->uc_mcontext.gregs[REG_RIP];
865 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
866 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
867 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
868 &uc->uc_sigmask, puc);
871 #elif defined(__powerpc__)
873 /***********************************************************************
874 * signal context platform-specific definitions
878 /* All Registers access - only for local access */
879 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
880 /* Gpr Registers access */
881 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
882 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
883 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
884 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
885 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
886 # define LR_sig(context) REG_sig(link, context) /* Link register */
887 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
888 /* Float Registers access */
889 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
890 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
891 /* Exception Registers access */
892 # define DAR_sig(context) REG_sig(dar, context)
893 # define DSISR_sig(context) REG_sig(dsisr, context)
894 # define TRAP_sig(context) REG_sig(trap, context)
898 # include <sys/ucontext.h>
899 typedef struct ucontext SIGCONTEXT;
900 /* All Registers access - only for local access */
901 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
902 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
903 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
904 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
905 /* Gpr Registers access */
906 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
907 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
908 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
909 # define CTR_sig(context) REG_sig(ctr, context)
910 # define XER_sig(context) REG_sig(xer, context) /* Link register */
911 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
912 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
913 /* Float Registers access */
914 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
915 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
916 /* Exception Registers access */
917 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
918 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
919 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
920 #endif /* __APPLE__ */
922 int cpu_signal_handler(int host_signum, struct siginfo *info,
925 struct ucontext *uc = puc;
933 if (DSISR_sig(uc) & 0x00800000)
936 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
939 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
940 is_write, &uc->uc_sigmask, puc);
943 #elif defined(__alpha__)
945 int cpu_signal_handler(int host_signum, struct siginfo *info,
948 struct ucontext *uc = puc;
949 uint32_t *pc = uc->uc_mcontext.sc_pc;
953 /* XXX: need kernel patch to get write flag faster */
954 switch (insn >> 26) {
969 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
970 is_write, &uc->uc_sigmask, puc);
972 #elif defined(__sparc__)
974 int cpu_signal_handler(int host_signum, struct siginfo *info,
977 uint32_t *regs = (uint32_t *)(info + 1);
978 void *sigmask = (regs + 20);
983 /* XXX: is there a standard glibc define ? */
985 /* XXX: need kernel patch to get write flag faster */
987 insn = *(uint32_t *)pc;
988 if ((insn >> 30) == 3) {
989 switch((insn >> 19) & 0x3f) {
1001 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1002 is_write, sigmask, NULL);
1005 #elif defined(__arm__)
1007 int cpu_signal_handler(int host_signum, struct siginfo *info,
1010 struct ucontext *uc = puc;
1014 pc = uc->uc_mcontext.gregs[R15];
1015 /* XXX: compute is_write */
1017 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1022 #elif defined(__mc68000)
1024 int cpu_signal_handler(int host_signum, struct siginfo *info,
1027 struct ucontext *uc = puc;
1031 pc = uc->uc_mcontext.gregs[16];
1032 /* XXX: compute is_write */
1034 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1036 &uc->uc_sigmask, puc);
1041 #error host CPU specific signal handler needed
1045 #endif /* !defined(CONFIG_SOFTMMU) */