4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
41 /* Nonzero if this instruction has been conditionally skipped. */
43 /* The label that will be jumped to when the instruction is skipped. */
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
48 #if !defined(CONFIG_USER_ONLY)
53 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) (s->user)
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
64 #define TBPARAM(x) (long)(x)
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr;
69 static uint32_t *gen_opparam_ptr;
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
82 static GenOpFunc1 *gen_test_cc[14] = {
99 const uint8_t table_logic_cc[16] = {
118 static GenOpFunc1 *gen_shift_T1_im[4] = {
125 static GenOpFunc *gen_shift_T1_0[4] = {
132 static GenOpFunc1 *gen_shift_T2_im[4] = {
139 static GenOpFunc *gen_shift_T2_0[4] = {
146 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
147 gen_op_shll_T1_im_cc,
148 gen_op_shrl_T1_im_cc,
149 gen_op_sarl_T1_im_cc,
150 gen_op_rorl_T1_im_cc,
153 static GenOpFunc *gen_shift_T1_0_cc[4] = {
160 static GenOpFunc *gen_shift_T1_T0[4] = {
167 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
168 gen_op_shll_T1_T0_cc,
169 gen_op_shrl_T1_T0_cc,
170 gen_op_sarl_T1_T0_cc,
171 gen_op_rorl_T1_T0_cc,
174 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
231 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
270 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
276 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
277 gen_op_shll_T0_im_thumb,
278 gen_op_shrl_T0_im_thumb,
279 gen_op_sarl_T0_im_thumb,
282 static inline void gen_bx(DisasContext *s)
284 s->is_jmp = DISAS_UPDATE;
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
292 #define gen_ldst(name, s) do { \
294 gen_op_##name##_user(); \
296 gen_op_##name##_kernel(); \
300 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
305 /* normaly, since we updated PC, we need only to add one insn */
307 val = (long)s->pc + 2;
309 val = (long)s->pc + 4;
310 gen_op_movl_TN_im[t](val);
312 gen_op_movl_TN_reg[t][reg]();
316 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
318 gen_movl_TN_reg(s, reg, 0);
321 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
323 gen_movl_TN_reg(s, reg, 1);
326 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
328 gen_movl_TN_reg(s, reg, 2);
331 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
333 gen_op_movl_reg_TN[t][reg]();
335 s->is_jmp = DISAS_JUMP;
339 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
341 gen_movl_reg_TN(s, reg, 0);
344 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
346 gen_movl_reg_TN(s, reg, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext *s)
352 gen_op_movl_T0_im(s->pc);
353 gen_movl_reg_T0(s, 15);
354 s->is_jmp = DISAS_UPDATE;
357 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
359 int val, rm, shift, shiftop;
361 if (!(insn & (1 << 25))) {
364 if (!(insn & (1 << 23)))
367 gen_op_addl_T1_im(val);
371 shift = (insn >> 7) & 0x1f;
372 gen_movl_T2_reg(s, rm);
373 shiftop = (insn >> 5) & 3;
375 gen_shift_T2_im[shiftop](shift);
376 } else if (shiftop != 0) {
377 gen_shift_T2_0[shiftop]();
379 if (!(insn & (1 << 23)))
386 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
390 if (insn & (1 << 22)) {
392 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
393 if (!(insn & (1 << 23)))
396 gen_op_addl_T1_im(val);
400 gen_movl_T2_reg(s, rm);
401 if (!(insn & (1 << 23)))
408 #define VFP_OP(name) \
409 static inline void gen_vfp_##name(int dp) \
412 gen_op_vfp_##name##d(); \
414 gen_op_vfp_##name##s(); \
436 static inline void gen_vfp_ld(DisasContext *s, int dp)
439 gen_ldst(vfp_ldd, s);
441 gen_ldst(vfp_lds, s);
444 static inline void gen_vfp_st(DisasContext *s, int dp)
447 gen_ldst(vfp_std, s);
449 gen_ldst(vfp_sts, s);
453 vfp_reg_offset (int dp, int reg)
456 return offsetof(CPUARMState, vfp.regs[reg]);
458 return offsetof(CPUARMState, vfp.regs[reg >> 1])
459 + offsetof(CPU_DoubleU, l.upper);
461 return offsetof(CPUARMState, vfp.regs[reg >> 1])
462 + offsetof(CPU_DoubleU, l.lower);
465 static inline void gen_mov_F0_vreg(int dp, int reg)
468 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
470 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
473 static inline void gen_mov_F1_vreg(int dp, int reg)
476 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
478 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
481 static inline void gen_mov_vreg_F0(int dp, int reg)
484 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
486 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
489 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
490 instruction is not defined. */
491 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
495 /* ??? Some cp15 registers are accessible from userspace. */
499 rd = (insn >> 12) & 0xf;
500 if (insn & (1 << 20)) {
501 gen_op_movl_T0_cp15(insn);
502 /* If the destination register is r15 then sets condition codes. */
504 gen_movl_reg_T0(s, rd);
506 gen_movl_T0_reg(s, rd);
507 gen_op_movl_cp15_T0(insn);
513 /* Disassemble a VFP instruction. Returns nonzero if an error occured
514 (ie. an undefined instruction). */
515 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
517 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
520 dp = ((insn & 0xf00) == 0xb00);
521 switch ((insn >> 24) & 0xf) {
523 if (insn & (1 << 4)) {
524 /* single register transfer */
525 if ((insn & 0x6f) != 0x00)
527 rd = (insn >> 12) & 0xf;
531 rn = (insn >> 16) & 0xf;
532 /* Get the existing value even for arm->vfp moves because
533 we only set half the register. */
534 gen_mov_F0_vreg(1, rn);
536 if (insn & (1 << 20)) {
538 if (insn & (1 << 21))
539 gen_movl_reg_T1(s, rd);
541 gen_movl_reg_T0(s, rd);
544 if (insn & (1 << 21))
545 gen_movl_T1_reg(s, rd);
547 gen_movl_T0_reg(s, rd);
549 gen_mov_vreg_F0(dp, rn);
552 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
553 if (insn & (1 << 20)) {
555 if (insn & (1 << 21)) {
556 /* system register */
563 gen_op_vfp_movl_T0_fpscr_flags();
565 gen_op_vfp_movl_T0_fpscr();
571 gen_mov_F0_vreg(0, rn);
575 /* Set the 4 flag bits in the CPSR. */
576 gen_op_movl_cpsr_T0(0xf0000000);
578 gen_movl_reg_T0(s, rd);
581 gen_movl_T0_reg(s, rd);
582 if (insn & (1 << 21)) {
583 /* system register */
586 /* Writes are ignored. */
589 gen_op_vfp_movl_fpscr_T0();
590 /* This could change vector settings, so jump to
591 the next instuction. */
599 gen_mov_vreg_F0(0, rn);
604 /* data processing */
605 /* The opcode is in bits 23, 21, 20 and 6. */
606 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
610 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
612 /* rn is register number */
615 rn = (insn >> 16) & 0xf;
618 if (op == 15 && (rn == 15 || rn > 17)) {
619 /* Integer or single precision destination. */
620 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
622 if (insn & (1 << 22))
624 rd = (insn >> 12) & 0xf;
627 if (op == 15 && (rn == 16 || rn == 17)) {
628 /* Integer source. */
629 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
636 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
637 if (op == 15 && rn == 15) {
638 /* Double precision destination. */
639 if (insn & (1 << 22))
641 rd = (insn >> 12) & 0xf;
643 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
644 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
647 veclen = env->vfp.vec_len;
648 if (op == 15 && rn > 3)
651 /* Shut up compiler warnings. */
662 /* Figure out what type of vector operation this is. */
663 if ((rd & bank_mask) == 0) {
668 delta_d = (env->vfp.vec_stride >> 1) + 1;
670 delta_d = env->vfp.vec_stride + 1;
672 if ((rm & bank_mask) == 0) {
673 /* mixed scalar/vector */
682 /* Load the initial operands. */
688 gen_mov_F0_vreg(0, rm);
693 gen_mov_F0_vreg(dp, rd);
694 gen_mov_F1_vreg(dp, rm);
698 /* Compare with zero */
699 gen_mov_F0_vreg(dp, rd);
703 /* One source operand. */
704 gen_mov_F0_vreg(dp, rm);
707 /* Two source operands. */
708 gen_mov_F0_vreg(dp, rn);
709 gen_mov_F1_vreg(dp, rm);
713 /* Perform the calculation. */
715 case 0: /* mac: fd + (fn * fm) */
717 gen_mov_F1_vreg(dp, rd);
720 case 1: /* nmac: fd - (fn * fm) */
723 gen_mov_F1_vreg(dp, rd);
726 case 2: /* msc: -fd + (fn * fm) */
728 gen_mov_F1_vreg(dp, rd);
731 case 3: /* nmsc: -fd - (fn * fm) */
733 gen_mov_F1_vreg(dp, rd);
737 case 4: /* mul: fn * fm */
740 case 5: /* nmul: -(fn * fm) */
744 case 6: /* add: fn + fm */
747 case 7: /* sub: fn - fm */
750 case 8: /* div: fn / fm */
753 case 15: /* extension space */
780 case 15: /* single<->double conversion */
795 case 25: /* ftouiz */
801 case 27: /* ftosiz */
804 default: /* undefined */
805 printf ("rn:%d\n", rn);
809 default: /* undefined */
810 printf ("op:%d\n", op);
814 /* Write back the result. */
815 if (op == 15 && (rn >= 8 && rn <= 11))
816 ; /* Comparison, do nothing. */
817 else if (op == 15 && rn > 17)
818 /* Integer result. */
819 gen_mov_vreg_F0(0, rd);
820 else if (op == 15 && rn == 15)
822 gen_mov_vreg_F0(!dp, rd);
824 gen_mov_vreg_F0(dp, rd);
826 /* break out of the loop if we have finished */
830 if (op == 15 && delta_m == 0) {
831 /* single source one-many */
833 rd = ((rd + delta_d) & (bank_mask - 1))
835 gen_mov_vreg_F0(dp, rd);
839 /* Setup the next operands. */
841 rd = ((rd + delta_d) & (bank_mask - 1))
845 /* One source operand. */
846 rm = ((rm + delta_m) & (bank_mask - 1))
848 gen_mov_F0_vreg(dp, rm);
850 /* Two source operands. */
851 rn = ((rn + delta_d) & (bank_mask - 1))
853 gen_mov_F0_vreg(dp, rn);
855 rm = ((rm + delta_m) & (bank_mask - 1))
857 gen_mov_F1_vreg(dp, rm);
865 if (dp && (insn & (1 << 22))) {
866 /* two-register transfer */
867 rn = (insn >> 16) & 0xf;
868 rd = (insn >> 12) & 0xf;
874 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
876 if (insn & (1 << 20)) {
879 gen_mov_F0_vreg(1, rm);
881 gen_movl_reg_T0(s, rd);
882 gen_movl_reg_T1(s, rn);
884 gen_mov_F0_vreg(0, rm);
886 gen_movl_reg_T0(s, rn);
887 gen_mov_F0_vreg(0, rm + 1);
889 gen_movl_reg_T0(s, rd);
894 gen_movl_T0_reg(s, rd);
895 gen_movl_T1_reg(s, rn);
897 gen_mov_vreg_F0(1, rm);
899 gen_movl_T0_reg(s, rn);
901 gen_mov_vreg_F0(0, rm);
902 gen_movl_T0_reg(s, rd);
904 gen_mov_vreg_F0(0, rm + 1);
909 rn = (insn >> 16) & 0xf;
911 rd = (insn >> 12) & 0xf;
913 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
914 gen_movl_T1_reg(s, rn);
915 if ((insn & 0x01200000) == 0x01000000) {
916 /* Single load/store */
917 offset = (insn & 0xff) << 2;
918 if ((insn & (1 << 23)) == 0)
920 gen_op_addl_T1_im(offset);
921 if (insn & (1 << 20)) {
923 gen_mov_vreg_F0(dp, rd);
925 gen_mov_F0_vreg(dp, rd);
929 /* load/store multiple */
931 n = (insn >> 1) & 0x7f;
935 if (insn & (1 << 24)) /* pre-decrement */
936 gen_op_addl_T1_im(-((insn & 0xff) << 2));
942 for (i = 0; i < n; i++) {
943 if (insn & (1 << 20)) {
946 gen_mov_vreg_F0(dp, rd + i);
949 gen_mov_F0_vreg(dp, rd + i);
952 gen_op_addl_T1_im(offset);
954 if (insn & (1 << 21)) {
956 if (insn & (1 << 24))
957 offset = -offset * n;
958 else if (dp && (insn & 1))
964 gen_op_addl_T1_im(offset);
965 gen_movl_reg_T1(s, rn);
971 /* Should never happen. */
977 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
979 TranslationBlock *tb;
982 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
984 gen_op_goto_tb0(TBPARAM(tb));
986 gen_op_goto_tb1(TBPARAM(tb));
987 gen_op_movl_T0_im(dest);
988 gen_op_movl_r15_T0();
989 gen_op_movl_T0_im((long)tb + n);
992 gen_op_movl_T0_im(dest);
993 gen_op_movl_r15_T0();
999 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1001 if (__builtin_expect(s->singlestep_enabled, 0)) {
1002 /* An indirect jump so that we still trigger the debug exception. */
1005 gen_op_movl_T0_im(dest);
1008 gen_goto_tb(s, 0, dest);
1009 s->is_jmp = DISAS_TB_JUMP;
1013 static inline void gen_mulxy(int x, int y)
1016 gen_op_sarl_T0_im(16);
1020 gen_op_sarl_T1_im(16);
1026 /* Return the mask of PSR bits set by a MSR instruction. */
1027 static uint32_t msr_mask(DisasContext *s, int flags) {
1031 if (flags & (1 << 0))
1033 if (flags & (1 << 1))
1035 if (flags & (1 << 2))
1037 if (flags & (1 << 3))
1039 /* Mask out undefined bits and state bits. */
1041 /* Mask out privileged bits. */
1047 /* Returns nonzero if access to the PSR is not permitted. */
1048 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1051 /* ??? This is also undefined in system mode. */
1054 gen_op_movl_spsr_T0(mask);
1056 gen_op_movl_cpsr_T0(mask);
1062 static void gen_exception_return(DisasContext *s)
1064 gen_op_movl_reg_TN[0][15]();
1065 gen_op_movl_T0_spsr();
1066 gen_op_movl_cpsr_T0(0xffffffff);
1067 s->is_jmp = DISAS_UPDATE;
1070 static void disas_arm_insn(CPUState * env, DisasContext *s)
1072 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1074 insn = ldl_code(s->pc);
1079 /* Unconditional instructions. */
1080 if ((insn & 0x0d70f000) == 0x0550f000)
1082 else if ((insn & 0x0e000000) == 0x0a000000) {
1083 /* branch link and change to thumb (blx <offset>) */
1086 val = (uint32_t)s->pc;
1087 gen_op_movl_T0_im(val);
1088 gen_movl_reg_T0(s, 14);
1089 /* Sign-extend the 24-bit offset */
1090 offset = (((int32_t)insn) << 8) >> 8;
1091 /* offset * 4 + bit24 * 2 + (thumb bit) */
1092 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1093 /* pipeline offset */
1095 gen_op_movl_T0_im(val);
1098 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1099 /* Coprocessor double register transfer. */
1100 } else if ((insn & 0x0f000010) == 0x0e000010) {
1101 /* Additional coprocessor register transfer. */
1102 } else if ((insn & 0x0ff10010) == 0x01000000) {
1103 /* cps (privileged) */
1104 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1106 if (insn & (1 << 9)) {
1107 /* BE8 mode not implemented. */
1115 /* if not always execute, we generate a conditional jump to
1117 s->condlabel = gen_new_label();
1118 gen_test_cc[cond ^ 1](s->condlabel);
1120 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1121 //s->is_jmp = DISAS_JUMP_NEXT;
1123 if ((insn & 0x0f900000) == 0x03000000) {
1124 if ((insn & 0x0fb0f000) != 0x0320f000)
1126 /* CPSR = immediate */
1128 shift = ((insn >> 8) & 0xf) * 2;
1130 val = (val >> shift) | (val << (32 - shift));
1131 gen_op_movl_T0_im(val);
1132 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
1133 (insn & (1 << 22)) != 0))
1135 } else if ((insn & 0x0f900000) == 0x01000000
1136 && (insn & 0x00000090) != 0x00000090) {
1137 /* miscellaneous instructions */
1138 op1 = (insn >> 21) & 3;
1139 sh = (insn >> 4) & 0xf;
1142 case 0x0: /* move program status register */
1145 gen_movl_T0_reg(s, rm);
1146 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
1151 rd = (insn >> 12) & 0xf;
1155 gen_op_movl_T0_spsr();
1157 gen_op_movl_T0_cpsr();
1159 gen_movl_reg_T0(s, rd);
1164 /* branch/exchange thumb (bx). */
1165 gen_movl_T0_reg(s, rm);
1167 } else if (op1 == 3) {
1169 rd = (insn >> 12) & 0xf;
1170 gen_movl_T0_reg(s, rm);
1172 gen_movl_reg_T0(s, rd);
1180 /* Trivial implementation equivalent to bx. */
1181 gen_movl_T0_reg(s, rm);
1191 /* branch link/exchange thumb (blx) */
1192 val = (uint32_t)s->pc;
1193 gen_op_movl_T0_im(val);
1194 gen_movl_reg_T0(s, 14);
1195 gen_movl_T0_reg(s, rm);
1198 case 0x5: /* saturating add/subtract */
1199 rd = (insn >> 12) & 0xf;
1200 rn = (insn >> 16) & 0xf;
1201 gen_movl_T0_reg(s, rm);
1202 gen_movl_T1_reg(s, rn);
1204 gen_op_double_T1_saturate();
1206 gen_op_subl_T0_T1_saturate();
1208 gen_op_addl_T0_T1_saturate();
1209 gen_movl_reg_T0(s, rd);
1211 case 0x8: /* signed multiply */
1215 rs = (insn >> 8) & 0xf;
1216 rn = (insn >> 12) & 0xf;
1217 rd = (insn >> 16) & 0xf;
1219 /* (32 * 16) >> 16 */
1220 gen_movl_T0_reg(s, rm);
1221 gen_movl_T1_reg(s, rs);
1223 gen_op_sarl_T1_im(16);
1226 gen_op_imulw_T0_T1();
1227 if ((sh & 2) == 0) {
1228 gen_movl_T1_reg(s, rn);
1229 gen_op_addl_T0_T1_setq();
1231 gen_movl_reg_T0(s, rd);
1234 gen_movl_T0_reg(s, rm);
1235 gen_movl_T1_reg(s, rs);
1236 gen_mulxy(sh & 2, sh & 4);
1238 gen_op_signbit_T1_T0();
1239 gen_op_addq_T0_T1(rn, rd);
1240 gen_movl_reg_T0(s, rn);
1241 gen_movl_reg_T1(s, rd);
1244 gen_movl_T1_reg(s, rn);
1245 gen_op_addl_T0_T1_setq();
1247 gen_movl_reg_T0(s, rd);
1254 } else if (((insn & 0x0e000000) == 0 &&
1255 (insn & 0x00000090) != 0x90) ||
1256 ((insn & 0x0e000000) == (1 << 25))) {
1257 int set_cc, logic_cc, shiftop;
1259 op1 = (insn >> 21) & 0xf;
1260 set_cc = (insn >> 20) & 1;
1261 logic_cc = table_logic_cc[op1] & set_cc;
1263 /* data processing instruction */
1264 if (insn & (1 << 25)) {
1265 /* immediate operand */
1267 shift = ((insn >> 8) & 0xf) * 2;
1269 val = (val >> shift) | (val << (32 - shift));
1270 gen_op_movl_T1_im(val);
1271 if (logic_cc && shift)
1276 gen_movl_T1_reg(s, rm);
1277 shiftop = (insn >> 5) & 3;
1278 if (!(insn & (1 << 4))) {
1279 shift = (insn >> 7) & 0x1f;
1282 gen_shift_T1_im_cc[shiftop](shift);
1284 gen_shift_T1_im[shiftop](shift);
1286 } else if (shiftop != 0) {
1288 gen_shift_T1_0_cc[shiftop]();
1290 gen_shift_T1_0[shiftop]();
1294 rs = (insn >> 8) & 0xf;
1295 gen_movl_T0_reg(s, rs);
1297 gen_shift_T1_T0_cc[shiftop]();
1299 gen_shift_T1_T0[shiftop]();
1303 if (op1 != 0x0f && op1 != 0x0d) {
1304 rn = (insn >> 16) & 0xf;
1305 gen_movl_T0_reg(s, rn);
1307 rd = (insn >> 12) & 0xf;
1310 gen_op_andl_T0_T1();
1311 gen_movl_reg_T0(s, rd);
1313 gen_op_logic_T0_cc();
1316 gen_op_xorl_T0_T1();
1317 gen_movl_reg_T0(s, rd);
1319 gen_op_logic_T0_cc();
1322 if (set_cc && rd == 15) {
1323 /* SUBS r15, ... is used for exception return. */
1326 gen_op_subl_T0_T1_cc();
1327 gen_exception_return(s);
1330 gen_op_subl_T0_T1_cc();
1332 gen_op_subl_T0_T1();
1333 gen_movl_reg_T0(s, rd);
1338 gen_op_rsbl_T0_T1_cc();
1340 gen_op_rsbl_T0_T1();
1341 gen_movl_reg_T0(s, rd);
1345 gen_op_addl_T0_T1_cc();
1347 gen_op_addl_T0_T1();
1348 gen_movl_reg_T0(s, rd);
1352 gen_op_adcl_T0_T1_cc();
1354 gen_op_adcl_T0_T1();
1355 gen_movl_reg_T0(s, rd);
1359 gen_op_sbcl_T0_T1_cc();
1361 gen_op_sbcl_T0_T1();
1362 gen_movl_reg_T0(s, rd);
1366 gen_op_rscl_T0_T1_cc();
1368 gen_op_rscl_T0_T1();
1369 gen_movl_reg_T0(s, rd);
1373 gen_op_andl_T0_T1();
1374 gen_op_logic_T0_cc();
1379 gen_op_xorl_T0_T1();
1380 gen_op_logic_T0_cc();
1385 gen_op_subl_T0_T1_cc();
1390 gen_op_addl_T0_T1_cc();
1395 gen_movl_reg_T0(s, rd);
1397 gen_op_logic_T0_cc();
1400 if (logic_cc && rd == 15) {
1401 /* MOVS r15, ... is used for exception return. */
1404 gen_op_movl_T0_T1();
1405 gen_exception_return(s);
1407 gen_movl_reg_T1(s, rd);
1409 gen_op_logic_T1_cc();
1413 gen_op_bicl_T0_T1();
1414 gen_movl_reg_T0(s, rd);
1416 gen_op_logic_T0_cc();
1421 gen_movl_reg_T1(s, rd);
1423 gen_op_logic_T1_cc();
1427 /* other instructions */
1428 op1 = (insn >> 24) & 0xf;
1432 /* multiplies, extra load/stores */
1433 sh = (insn >> 5) & 3;
1436 rd = (insn >> 16) & 0xf;
1437 rn = (insn >> 12) & 0xf;
1438 rs = (insn >> 8) & 0xf;
1440 if (((insn >> 22) & 3) == 0) {
1442 gen_movl_T0_reg(s, rs);
1443 gen_movl_T1_reg(s, rm);
1445 if (insn & (1 << 21)) {
1446 gen_movl_T1_reg(s, rn);
1447 gen_op_addl_T0_T1();
1449 if (insn & (1 << 20))
1450 gen_op_logic_T0_cc();
1451 gen_movl_reg_T0(s, rd);
1454 gen_movl_T0_reg(s, rs);
1455 gen_movl_T1_reg(s, rm);
1456 if (insn & (1 << 22))
1457 gen_op_imull_T0_T1();
1459 gen_op_mull_T0_T1();
1460 if (insn & (1 << 21)) /* mult accumulate */
1461 gen_op_addq_T0_T1(rn, rd);
1462 if (!(insn & (1 << 23))) { /* double accumulate */
1464 gen_op_addq_lo_T0_T1(rn);
1465 gen_op_addq_lo_T0_T1(rd);
1467 if (insn & (1 << 20))
1469 gen_movl_reg_T0(s, rn);
1470 gen_movl_reg_T1(s, rd);
1473 rn = (insn >> 16) & 0xf;
1474 rd = (insn >> 12) & 0xf;
1475 if (insn & (1 << 23)) {
1476 /* load/store exclusive */
1479 /* SWP instruction */
1482 gen_movl_T0_reg(s, rm);
1483 gen_movl_T1_reg(s, rn);
1484 if (insn & (1 << 22)) {
1489 gen_movl_reg_T0(s, rd);
1493 /* Misc load/store */
1494 rn = (insn >> 16) & 0xf;
1495 rd = (insn >> 12) & 0xf;
1496 gen_movl_T1_reg(s, rn);
1497 if (insn & (1 << 24))
1498 gen_add_datah_offset(s, insn);
1499 if (insn & (1 << 20)) {
1513 gen_movl_reg_T0(s, rd);
1514 } else if (sh & 2) {
1518 gen_movl_T0_reg(s, rd);
1520 gen_op_addl_T1_im(4);
1521 gen_movl_T0_reg(s, rd + 1);
1523 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1524 gen_op_addl_T1_im(-4);
1528 gen_movl_reg_T0(s, rd);
1529 gen_op_addl_T1_im(4);
1531 gen_movl_reg_T0(s, rd + 1);
1532 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1533 gen_op_addl_T1_im(-4);
1537 gen_movl_T0_reg(s, rd);
1540 if (!(insn & (1 << 24))) {
1541 gen_add_datah_offset(s, insn);
1542 gen_movl_reg_T1(s, rn);
1543 } else if (insn & (1 << 21)) {
1544 gen_movl_reg_T1(s, rn);
1552 /* load/store byte/word */
1553 rn = (insn >> 16) & 0xf;
1554 rd = (insn >> 12) & 0xf;
1555 gen_movl_T1_reg(s, rn);
1556 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1557 if (insn & (1 << 24))
1558 gen_add_data_offset(s, insn);
1559 if (insn & (1 << 20)) {
1561 #if defined(CONFIG_USER_ONLY)
1562 if (insn & (1 << 22))
1567 if (insn & (1 << 22)) {
1571 gen_op_ldub_kernel();
1576 gen_op_ldl_kernel();
1582 gen_movl_reg_T0(s, rd);
1585 gen_movl_T0_reg(s, rd);
1586 #if defined(CONFIG_USER_ONLY)
1587 if (insn & (1 << 22))
1592 if (insn & (1 << 22)) {
1596 gen_op_stb_kernel();
1601 gen_op_stl_kernel();
1605 if (!(insn & (1 << 24))) {
1606 gen_add_data_offset(s, insn);
1607 gen_movl_reg_T1(s, rn);
1608 } else if (insn & (1 << 21))
1609 gen_movl_reg_T1(s, rn); {
1616 /* load/store multiple words */
1617 /* XXX: store correct base if write back */
1619 if (insn & (1 << 22)) {
1621 goto illegal_op; /* only usable in supervisor mode */
1623 if ((insn & (1 << 15)) == 0)
1626 rn = (insn >> 16) & 0xf;
1627 gen_movl_T1_reg(s, rn);
1629 /* compute total size */
1632 if (insn & (1 << i))
1635 /* XXX: test invalid n == 0 case ? */
1636 if (insn & (1 << 23)) {
1637 if (insn & (1 << 24)) {
1639 gen_op_addl_T1_im(4);
1641 /* post increment */
1644 if (insn & (1 << 24)) {
1646 gen_op_addl_T1_im(-(n * 4));
1648 /* post decrement */
1650 gen_op_addl_T1_im(-((n - 1) * 4));
1655 if (insn & (1 << i)) {
1656 if (insn & (1 << 20)) {
1662 gen_op_movl_user_T0(i);
1664 gen_movl_reg_T0(s, i);
1669 /* special case: r15 = PC + 12 */
1670 val = (long)s->pc + 8;
1671 gen_op_movl_TN_im[0](val);
1673 gen_op_movl_T0_user(i);
1675 gen_movl_T0_reg(s, i);
1680 /* no need to add after the last transfer */
1682 gen_op_addl_T1_im(4);
1685 if (insn & (1 << 21)) {
1687 if (insn & (1 << 23)) {
1688 if (insn & (1 << 24)) {
1691 /* post increment */
1692 gen_op_addl_T1_im(4);
1695 if (insn & (1 << 24)) {
1698 gen_op_addl_T1_im(-((n - 1) * 4));
1700 /* post decrement */
1701 gen_op_addl_T1_im(-(n * 4));
1704 gen_movl_reg_T1(s, rn);
1706 if ((insn & (1 << 22)) && !user) {
1707 /* Restore CPSR from SPSR. */
1708 gen_op_movl_T0_spsr();
1709 gen_op_movl_cpsr_T0(0xffffffff);
1710 s->is_jmp = DISAS_UPDATE;
1719 /* branch (and link) */
1720 val = (int32_t)s->pc;
1721 if (insn & (1 << 24)) {
1722 gen_op_movl_T0_im(val);
1723 gen_op_movl_reg_TN[0][14]();
1725 offset = (((int32_t)insn << 8) >> 8);
1726 val += (offset << 2) + 4;
1734 op1 = (insn >> 8) & 0xf;
1738 if (disas_vfp_insn (env, s, insn))
1742 if (disas_cp15_insn (s, insn))
1746 /* unknown coprocessor. */
1752 gen_op_movl_T0_im((long)s->pc);
1753 gen_op_movl_reg_TN[0][15]();
1755 s->is_jmp = DISAS_JUMP;
1759 gen_op_movl_T0_im((long)s->pc - 4);
1760 gen_op_movl_reg_TN[0][15]();
1761 gen_op_undef_insn();
1762 s->is_jmp = DISAS_JUMP;
1768 static void disas_thumb_insn(DisasContext *s)
1770 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1774 insn = lduw_code(s->pc);
1777 switch (insn >> 12) {
1780 op = (insn >> 11) & 3;
1783 rn = (insn >> 3) & 7;
1784 gen_movl_T0_reg(s, rn);
1785 if (insn & (1 << 10)) {
1787 gen_op_movl_T1_im((insn >> 6) & 7);
1790 rm = (insn >> 6) & 7;
1791 gen_movl_T1_reg(s, rm);
1793 if (insn & (1 << 9))
1794 gen_op_subl_T0_T1_cc();
1796 gen_op_addl_T0_T1_cc();
1797 gen_movl_reg_T0(s, rd);
1799 /* shift immediate */
1800 rm = (insn >> 3) & 7;
1801 shift = (insn >> 6) & 0x1f;
1802 gen_movl_T0_reg(s, rm);
1803 gen_shift_T0_im_thumb[op](shift);
1804 gen_movl_reg_T0(s, rd);
1808 /* arithmetic large immediate */
1809 op = (insn >> 11) & 3;
1810 rd = (insn >> 8) & 0x7;
1812 gen_op_movl_T0_im(insn & 0xff);
1814 gen_movl_T0_reg(s, rd);
1815 gen_op_movl_T1_im(insn & 0xff);
1819 gen_op_logic_T0_cc();
1822 gen_op_subl_T0_T1_cc();
1825 gen_op_addl_T0_T1_cc();
1828 gen_op_subl_T0_T1_cc();
1832 gen_movl_reg_T0(s, rd);
1835 if (insn & (1 << 11)) {
1836 rd = (insn >> 8) & 7;
1837 /* load pc-relative. Bit 1 of PC is ignored. */
1838 val = s->pc + 2 + ((insn & 0xff) * 4);
1839 val &= ~(uint32_t)2;
1840 gen_op_movl_T1_im(val);
1842 gen_movl_reg_T0(s, rd);
1845 if (insn & (1 << 10)) {
1846 /* data processing extended or blx */
1847 rd = (insn & 7) | ((insn >> 4) & 8);
1848 rm = (insn >> 3) & 0xf;
1849 op = (insn >> 8) & 3;
1852 gen_movl_T0_reg(s, rd);
1853 gen_movl_T1_reg(s, rm);
1854 gen_op_addl_T0_T1();
1855 gen_movl_reg_T0(s, rd);
1858 gen_movl_T0_reg(s, rd);
1859 gen_movl_T1_reg(s, rm);
1860 gen_op_subl_T0_T1_cc();
1862 case 2: /* mov/cpy */
1863 gen_movl_T0_reg(s, rm);
1864 gen_movl_reg_T0(s, rd);
1866 case 3:/* branch [and link] exchange thumb register */
1867 if (insn & (1 << 7)) {
1868 val = (uint32_t)s->pc | 1;
1869 gen_op_movl_T1_im(val);
1870 gen_movl_reg_T1(s, 14);
1872 gen_movl_T0_reg(s, rm);
1879 /* data processing register */
1881 rm = (insn >> 3) & 7;
1882 op = (insn >> 6) & 0xf;
1883 if (op == 2 || op == 3 || op == 4 || op == 7) {
1884 /* the shift/rotate ops want the operands backwards */
1893 if (op == 9) /* neg */
1894 gen_op_movl_T0_im(0);
1895 else if (op != 0xf) /* mvn doesn't read its first operand */
1896 gen_movl_T0_reg(s, rd);
1898 gen_movl_T1_reg(s, rm);
1901 gen_op_andl_T0_T1();
1902 gen_op_logic_T0_cc();
1905 gen_op_xorl_T0_T1();
1906 gen_op_logic_T0_cc();
1909 gen_op_shll_T1_T0_cc();
1912 gen_op_shrl_T1_T0_cc();
1915 gen_op_sarl_T1_T0_cc();
1918 gen_op_adcl_T0_T1_cc();
1921 gen_op_sbcl_T0_T1_cc();
1924 gen_op_rorl_T1_T0_cc();
1927 gen_op_andl_T0_T1();
1928 gen_op_logic_T0_cc();
1932 gen_op_subl_T0_T1_cc();
1935 gen_op_subl_T0_T1_cc();
1939 gen_op_addl_T0_T1_cc();
1944 gen_op_logic_T0_cc();
1947 gen_op_mull_T0_T1();
1948 gen_op_logic_T0_cc();
1951 gen_op_bicl_T0_T1();
1952 gen_op_logic_T0_cc();
1956 gen_op_logic_T1_cc();
1963 gen_movl_reg_T1(s, rm);
1965 gen_movl_reg_T0(s, rd);
1970 /* load/store register offset. */
1972 rn = (insn >> 3) & 7;
1973 rm = (insn >> 6) & 7;
1974 op = (insn >> 9) & 7;
1975 gen_movl_T1_reg(s, rn);
1976 gen_movl_T2_reg(s, rm);
1977 gen_op_addl_T1_T2();
1979 if (op < 3) /* store */
1980 gen_movl_T0_reg(s, rd);
2008 if (op >= 3) /* load */
2009 gen_movl_reg_T0(s, rd);
2013 /* load/store word immediate offset */
2015 rn = (insn >> 3) & 7;
2016 gen_movl_T1_reg(s, rn);
2017 val = (insn >> 4) & 0x7c;
2018 gen_op_movl_T2_im(val);
2019 gen_op_addl_T1_T2();
2021 if (insn & (1 << 11)) {
2024 gen_movl_reg_T0(s, rd);
2027 gen_movl_T0_reg(s, rd);
2033 /* load/store byte immediate offset */
2035 rn = (insn >> 3) & 7;
2036 gen_movl_T1_reg(s, rn);
2037 val = (insn >> 6) & 0x1f;
2038 gen_op_movl_T2_im(val);
2039 gen_op_addl_T1_T2();
2041 if (insn & (1 << 11)) {
2044 gen_movl_reg_T0(s, rd);
2047 gen_movl_T0_reg(s, rd);
2053 /* load/store halfword immediate offset */
2055 rn = (insn >> 3) & 7;
2056 gen_movl_T1_reg(s, rn);
2057 val = (insn >> 5) & 0x3e;
2058 gen_op_movl_T2_im(val);
2059 gen_op_addl_T1_T2();
2061 if (insn & (1 << 11)) {
2064 gen_movl_reg_T0(s, rd);
2067 gen_movl_T0_reg(s, rd);
2073 /* load/store from stack */
2074 rd = (insn >> 8) & 7;
2075 gen_movl_T1_reg(s, 13);
2076 val = (insn & 0xff) * 4;
2077 gen_op_movl_T2_im(val);
2078 gen_op_addl_T1_T2();
2080 if (insn & (1 << 11)) {
2083 gen_movl_reg_T0(s, rd);
2086 gen_movl_T0_reg(s, rd);
2092 /* add to high reg */
2093 rd = (insn >> 8) & 7;
2094 if (insn & (1 << 11)) {
2096 gen_movl_T0_reg(s, 13);
2098 /* PC. bit 1 is ignored. */
2099 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2101 val = (insn & 0xff) * 4;
2102 gen_op_movl_T1_im(val);
2103 gen_op_addl_T0_T1();
2104 gen_movl_reg_T0(s, rd);
2109 op = (insn >> 8) & 0xf;
2112 /* adjust stack pointer */
2113 gen_movl_T1_reg(s, 13);
2114 val = (insn & 0x7f) * 4;
2115 if (insn & (1 << 7))
2116 val = -(int32_t)val;
2117 gen_op_movl_T2_im(val);
2118 gen_op_addl_T1_T2();
2119 gen_movl_reg_T1(s, 13);
2122 case 4: case 5: case 0xc: case 0xd:
2124 gen_movl_T1_reg(s, 13);
2125 if (insn & (1 << 8))
2129 for (i = 0; i < 8; i++) {
2130 if (insn & (1 << i))
2133 if ((insn & (1 << 11)) == 0) {
2134 gen_op_movl_T2_im(-offset);
2135 gen_op_addl_T1_T2();
2137 gen_op_movl_T2_im(4);
2138 for (i = 0; i < 8; i++) {
2139 if (insn & (1 << i)) {
2140 if (insn & (1 << 11)) {
2143 gen_movl_reg_T0(s, i);
2146 gen_movl_T0_reg(s, i);
2149 /* advance to the next address. */
2150 gen_op_addl_T1_T2();
2153 if (insn & (1 << 8)) {
2154 if (insn & (1 << 11)) {
2157 /* don't set the pc until the rest of the instruction
2161 gen_movl_T0_reg(s, 14);
2164 gen_op_addl_T1_T2();
2166 if ((insn & (1 << 11)) == 0) {
2167 gen_op_movl_T2_im(-offset);
2168 gen_op_addl_T1_T2();
2170 /* write back the new stack pointer */
2171 gen_movl_reg_T1(s, 13);
2172 /* set the new PC value */
2173 if ((insn & 0x0900) == 0x0900)
2183 /* load/store multiple */
2184 rn = (insn >> 8) & 0x7;
2185 gen_movl_T1_reg(s, rn);
2186 gen_op_movl_T2_im(4);
2187 for (i = 0; i < 8; i++) {
2188 if (insn & (1 << i)) {
2189 if (insn & (1 << 11)) {
2192 gen_movl_reg_T0(s, i);
2195 gen_movl_T0_reg(s, i);
2198 /* advance to the next address */
2199 gen_op_addl_T1_T2();
2202 /* Base register writeback. */
2203 if ((insn & (1 << rn)) == 0)
2204 gen_movl_reg_T1(s, rn);
2208 /* conditional branch or swi */
2209 cond = (insn >> 8) & 0xf;
2215 gen_op_movl_T0_im((long)s->pc | 1);
2216 /* Don't set r15. */
2217 gen_op_movl_reg_TN[0][15]();
2219 s->is_jmp = DISAS_JUMP;
2222 /* generate a conditional jump to next instruction */
2223 s->condlabel = gen_new_label();
2224 gen_test_cc[cond ^ 1](s->condlabel);
2226 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2227 //s->is_jmp = DISAS_JUMP_NEXT;
2228 gen_movl_T1_reg(s, 15);
2230 /* jump to the offset */
2231 val = (uint32_t)s->pc + 2;
2232 offset = ((int32_t)insn << 24) >> 24;
2238 /* unconditional branch */
2239 if (insn & (1 << 11))
2240 goto undef; /* Second half of a blx */
2241 val = (uint32_t)s->pc;
2242 offset = ((int32_t)insn << 21) >> 21;
2243 val += (offset << 1) + 2;
2248 /* branch and link [and switch to arm] */
2249 offset = ((int32_t)insn << 21) >> 10;
2250 insn = lduw_code(s->pc);
2251 offset |= insn & 0x7ff;
2253 val = (uint32_t)s->pc + 2;
2254 gen_op_movl_T1_im(val | 1);
2255 gen_movl_reg_T1(s, 14);
2258 if (insn & (1 << 12)) {
2263 val &= ~(uint32_t)2;
2264 gen_op_movl_T0_im(val);
2270 gen_op_movl_T0_im((long)s->pc - 2);
2271 gen_op_movl_reg_TN[0][15]();
2272 gen_op_undef_insn();
2273 s->is_jmp = DISAS_JUMP;
2276 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2277 basic block 'tb'. If search_pc is TRUE, also generate PC
2278 information for each intermediate instruction. */
2279 static inline int gen_intermediate_code_internal(CPUState *env,
2280 TranslationBlock *tb,
2283 DisasContext dc1, *dc = &dc1;
2284 uint16_t *gen_opc_end;
2286 target_ulong pc_start;
2287 uint32_t next_page_start;
2289 /* generate intermediate code */
2294 gen_opc_ptr = gen_opc_buf;
2295 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2296 gen_opparam_ptr = gen_opparam_buf;
2298 dc->is_jmp = DISAS_NEXT;
2300 dc->singlestep_enabled = env->singlestep_enabled;
2302 dc->thumb = env->thumb;
2303 #if !defined(CONFIG_USER_ONLY)
2304 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2306 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2310 if (env->nb_breakpoints > 0) {
2311 for(j = 0; j < env->nb_breakpoints; j++) {
2312 if (env->breakpoints[j] == dc->pc) {
2313 gen_op_movl_T0_im((long)dc->pc);
2314 gen_op_movl_reg_TN[0][15]();
2316 dc->is_jmp = DISAS_JUMP;
2322 j = gen_opc_ptr - gen_opc_buf;
2326 gen_opc_instr_start[lj++] = 0;
2328 gen_opc_pc[lj] = dc->pc;
2329 gen_opc_instr_start[lj] = 1;
2333 disas_thumb_insn(dc);
2335 disas_arm_insn(env, dc);
2337 if (dc->condjmp && !dc->is_jmp) {
2338 gen_set_label(dc->condlabel);
2341 /* Translation stops when a conditional branch is enoutered.
2342 * Otherwise the subsequent code could get translated several times.
2343 * Also stop translation when a page boundary is reached. This
2344 * ensures prefech aborts occur at the right place. */
2345 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2346 !env->singlestep_enabled &&
2347 dc->pc < next_page_start);
2348 /* At this stage dc->condjmp will only be set when the skipped
2349 * instruction was a conditional branch, and the PC has already been
2351 if (__builtin_expect(env->singlestep_enabled, 0)) {
2352 /* Make sure the pc is updated, and raise a debug exception. */
2355 gen_set_label(dc->condlabel);
2357 if (dc->condjmp || !dc->is_jmp) {
2358 gen_op_movl_T0_im((long)dc->pc);
2359 gen_op_movl_reg_TN[0][15]();
2364 switch(dc->is_jmp) {
2366 gen_goto_tb(dc, 1, dc->pc);
2371 /* indicate that the hash table must be used to find the next TB */
2376 /* nothing more to generate */
2380 gen_set_label(dc->condlabel);
2381 gen_goto_tb(dc, 1, dc->pc);
2385 *gen_opc_ptr = INDEX_op_end;
2388 if (loglevel & CPU_LOG_TB_IN_ASM) {
2389 fprintf(logfile, "----------------\n");
2390 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2391 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2392 fprintf(logfile, "\n");
2393 if (loglevel & (CPU_LOG_TB_OP)) {
2394 fprintf(logfile, "OP:\n");
2395 dump_ops(gen_opc_buf, gen_opparam_buf);
2396 fprintf(logfile, "\n");
2401 j = gen_opc_ptr - gen_opc_buf;
2404 gen_opc_instr_start[lj++] = 0;
2407 tb->size = dc->pc - pc_start;
2412 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2414 return gen_intermediate_code_internal(env, tb, 0);
2417 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2419 return gen_intermediate_code_internal(env, tb, 1);
2422 void cpu_reset(CPUARMState *env)
2424 #if defined (CONFIG_USER_ONLY)
2425 /* SVC mode with interrupts disabled. */
2426 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
2428 env->uncached_cpsr = ARM_CPU_MODE_USR;
2433 CPUARMState *cpu_arm_init(void)
2437 env = qemu_mallocz(sizeof(CPUARMState));
2446 void cpu_arm_close(CPUARMState *env)
2451 static const char *cpu_mode_names[16] = {
2452 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2453 "???", "???", "???", "und", "???", "???", "???", "sys"
2455 void cpu_dump_state(CPUState *env, FILE *f,
2456 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2468 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2470 cpu_fprintf(f, "\n");
2472 cpu_fprintf(f, " ");
2474 psr = cpsr_read(env);
2475 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2477 psr & (1 << 31) ? 'N' : '-',
2478 psr & (1 << 30) ? 'Z' : '-',
2479 psr & (1 << 29) ? 'C' : '-',
2480 psr & (1 << 28) ? 'V' : '-',
2481 psr & CPSR_T ? 'T' : 'A',
2482 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2484 for (i = 0; i < 16; i++) {
2485 d.d = env->vfp.regs[i];
2488 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2489 i * 2, (int)s0.i, s0.s,
2490 i * 2 + 1, (int)s0.i, s0.s,
2491 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2494 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);