4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
41 /* Nonzero if this instruction has been conditionally skipped. */
43 /* The label that will be jumped to when the instruction is skipped. */
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
49 #if !defined(CONFIG_USER_ONLY)
54 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) (s->user)
60 #define DISAS_JUMP_NEXT 4
62 #ifdef USE_DIRECT_JUMP
65 #define TBPARAM(x) (long)(x)
68 /* XXX: move that elsewhere */
69 static uint16_t *gen_opc_ptr;
70 static uint32_t *gen_opparam_ptr;
75 #define DEF(s, n, copy_size) INDEX_op_ ## s,
83 static GenOpFunc1 *gen_test_cc[14] = {
100 const uint8_t table_logic_cc[16] = {
119 static GenOpFunc1 *gen_shift_T1_im[4] = {
126 static GenOpFunc *gen_shift_T1_0[4] = {
133 static GenOpFunc1 *gen_shift_T2_im[4] = {
140 static GenOpFunc *gen_shift_T2_0[4] = {
147 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
148 gen_op_shll_T1_im_cc,
149 gen_op_shrl_T1_im_cc,
150 gen_op_sarl_T1_im_cc,
151 gen_op_rorl_T1_im_cc,
154 static GenOpFunc *gen_shift_T1_0_cc[4] = {
161 static GenOpFunc *gen_shift_T1_T0[4] = {
168 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
169 gen_op_shll_T1_T0_cc,
170 gen_op_shrl_T1_T0_cc,
171 gen_op_sarl_T1_T0_cc,
172 gen_op_rorl_T1_T0_cc,
175 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
232 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
271 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
277 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
278 gen_op_shll_T0_im_thumb,
279 gen_op_shrl_T0_im_thumb,
280 gen_op_sarl_T0_im_thumb,
283 static inline void gen_bx(DisasContext *s)
285 s->is_jmp = DISAS_UPDATE;
290 #if defined(CONFIG_USER_ONLY)
291 #define gen_ldst(name, s) gen_op_##name##_raw()
293 #define gen_ldst(name, s) do { \
296 gen_op_##name##_user(); \
298 gen_op_##name##_kernel(); \
302 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
307 /* normaly, since we updated PC, we need only to add one insn */
309 val = (long)s->pc + 2;
311 val = (long)s->pc + 4;
312 gen_op_movl_TN_im[t](val);
314 gen_op_movl_TN_reg[t][reg]();
318 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
320 gen_movl_TN_reg(s, reg, 0);
323 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
325 gen_movl_TN_reg(s, reg, 1);
328 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
330 gen_movl_TN_reg(s, reg, 2);
333 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
335 gen_op_movl_reg_TN[t][reg]();
337 s->is_jmp = DISAS_JUMP;
341 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
343 gen_movl_reg_TN(s, reg, 0);
346 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
348 gen_movl_reg_TN(s, reg, 1);
351 /* Force a TB lookup after an instruction that changes the CPU state. */
352 static inline void gen_lookup_tb(DisasContext *s)
354 gen_op_movl_T0_im(s->pc);
355 gen_movl_reg_T0(s, 15);
356 s->is_jmp = DISAS_UPDATE;
359 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
361 int val, rm, shift, shiftop;
363 if (!(insn & (1 << 25))) {
366 if (!(insn & (1 << 23)))
369 gen_op_addl_T1_im(val);
373 shift = (insn >> 7) & 0x1f;
374 gen_movl_T2_reg(s, rm);
375 shiftop = (insn >> 5) & 3;
377 gen_shift_T2_im[shiftop](shift);
378 } else if (shiftop != 0) {
379 gen_shift_T2_0[shiftop]();
381 if (!(insn & (1 << 23)))
388 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
393 if (insn & (1 << 22)) {
395 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
396 if (!(insn & (1 << 23)))
400 gen_op_addl_T1_im(val);
404 gen_op_addl_T1_im(extra);
406 gen_movl_T2_reg(s, rm);
407 if (!(insn & (1 << 23)))
414 #define VFP_OP(name) \
415 static inline void gen_vfp_##name(int dp) \
418 gen_op_vfp_##name##d(); \
420 gen_op_vfp_##name##s(); \
442 static inline void gen_vfp_ld(DisasContext *s, int dp)
445 gen_ldst(vfp_ldd, s);
447 gen_ldst(vfp_lds, s);
450 static inline void gen_vfp_st(DisasContext *s, int dp)
453 gen_ldst(vfp_std, s);
455 gen_ldst(vfp_sts, s);
459 vfp_reg_offset (int dp, int reg)
462 return offsetof(CPUARMState, vfp.regs[reg]);
464 return offsetof(CPUARMState, vfp.regs[reg >> 1])
465 + offsetof(CPU_DoubleU, l.upper);
467 return offsetof(CPUARMState, vfp.regs[reg >> 1])
468 + offsetof(CPU_DoubleU, l.lower);
471 static inline void gen_mov_F0_vreg(int dp, int reg)
474 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
476 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
479 static inline void gen_mov_F1_vreg(int dp, int reg)
482 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
484 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
487 static inline void gen_mov_vreg_F0(int dp, int reg)
490 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
492 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
495 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
496 instruction is not defined. */
497 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
501 /* ??? Some cp15 registers are accessible from userspace. */
505 if ((insn & 0x0fff0fff) == 0x0e070f90
506 || (insn & 0x0fff0fff) == 0x0e070f58) {
507 /* Wait for interrupt. */
508 gen_op_movl_T0_im((long)s->pc);
509 gen_op_movl_reg_TN[0][15]();
511 s->is_jmp = DISAS_JUMP;
514 rd = (insn >> 12) & 0xf;
515 if (insn & (1 << 20)) {
516 gen_op_movl_T0_cp15(insn);
517 /* If the destination register is r15 then sets condition codes. */
519 gen_movl_reg_T0(s, rd);
521 gen_movl_T0_reg(s, rd);
522 gen_op_movl_cp15_T0(insn);
528 /* Disassemble a VFP instruction. Returns nonzero if an error occured
529 (ie. an undefined instruction). */
530 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
532 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
535 if (!arm_feature(env, ARM_FEATURE_VFP))
538 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
539 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
540 if ((insn & 0x0fe00fff) != 0x0ee00a10)
542 rn = (insn >> 16) & 0xf;
543 if (rn != 0 && rn != 8)
546 dp = ((insn & 0xf00) == 0xb00);
547 switch ((insn >> 24) & 0xf) {
549 if (insn & (1 << 4)) {
550 /* single register transfer */
551 if ((insn & 0x6f) != 0x00)
553 rd = (insn >> 12) & 0xf;
557 rn = (insn >> 16) & 0xf;
558 /* Get the existing value even for arm->vfp moves because
559 we only set half the register. */
560 gen_mov_F0_vreg(1, rn);
562 if (insn & (1 << 20)) {
564 if (insn & (1 << 21))
565 gen_movl_reg_T1(s, rd);
567 gen_movl_reg_T0(s, rd);
570 if (insn & (1 << 21))
571 gen_movl_T1_reg(s, rd);
573 gen_movl_T0_reg(s, rd);
575 gen_mov_vreg_F0(dp, rn);
578 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
579 if (insn & (1 << 20)) {
581 if (insn & (1 << 21)) {
582 /* system register */
588 case ARM_VFP_FPINST2:
589 gen_op_vfp_movl_T0_xreg(rn);
593 gen_op_vfp_movl_T0_fpscr_flags();
595 gen_op_vfp_movl_T0_fpscr();
601 gen_mov_F0_vreg(0, rn);
605 /* Set the 4 flag bits in the CPSR. */
606 gen_op_movl_cpsr_T0(0xf0000000);
608 gen_movl_reg_T0(s, rd);
611 gen_movl_T0_reg(s, rd);
612 if (insn & (1 << 21)) {
614 /* system register */
617 /* Writes are ignored. */
620 gen_op_vfp_movl_fpscr_T0();
624 gen_op_vfp_movl_xreg_T0(rn);
628 case ARM_VFP_FPINST2:
629 gen_op_vfp_movl_xreg_T0(rn);
636 gen_mov_vreg_F0(0, rn);
641 /* data processing */
642 /* The opcode is in bits 23, 21, 20 and 6. */
643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
649 /* rn is register number */
652 rn = (insn >> 16) & 0xf;
655 if (op == 15 && (rn == 15 || rn > 17)) {
656 /* Integer or single precision destination. */
657 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
659 if (insn & (1 << 22))
661 rd = (insn >> 12) & 0xf;
664 if (op == 15 && (rn == 16 || rn == 17)) {
665 /* Integer source. */
666 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
673 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
674 if (op == 15 && rn == 15) {
675 /* Double precision destination. */
676 if (insn & (1 << 22))
678 rd = (insn >> 12) & 0xf;
680 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
681 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
684 veclen = env->vfp.vec_len;
685 if (op == 15 && rn > 3)
688 /* Shut up compiler warnings. */
699 /* Figure out what type of vector operation this is. */
700 if ((rd & bank_mask) == 0) {
705 delta_d = (env->vfp.vec_stride >> 1) + 1;
707 delta_d = env->vfp.vec_stride + 1;
709 if ((rm & bank_mask) == 0) {
710 /* mixed scalar/vector */
719 /* Load the initial operands. */
725 gen_mov_F0_vreg(0, rm);
730 gen_mov_F0_vreg(dp, rd);
731 gen_mov_F1_vreg(dp, rm);
735 /* Compare with zero */
736 gen_mov_F0_vreg(dp, rd);
740 /* One source operand. */
741 gen_mov_F0_vreg(dp, rm);
744 /* Two source operands. */
745 gen_mov_F0_vreg(dp, rn);
746 gen_mov_F1_vreg(dp, rm);
750 /* Perform the calculation. */
752 case 0: /* mac: fd + (fn * fm) */
754 gen_mov_F1_vreg(dp, rd);
757 case 1: /* nmac: fd - (fn * fm) */
760 gen_mov_F1_vreg(dp, rd);
763 case 2: /* msc: -fd + (fn * fm) */
765 gen_mov_F1_vreg(dp, rd);
768 case 3: /* nmsc: -fd - (fn * fm) */
770 gen_mov_F1_vreg(dp, rd);
774 case 4: /* mul: fn * fm */
777 case 5: /* nmul: -(fn * fm) */
781 case 6: /* add: fn + fm */
784 case 7: /* sub: fn - fm */
787 case 8: /* div: fn / fm */
790 case 15: /* extension space */
817 case 15: /* single<->double conversion */
832 case 25: /* ftouiz */
838 case 27: /* ftosiz */
841 default: /* undefined */
842 printf ("rn:%d\n", rn);
846 default: /* undefined */
847 printf ("op:%d\n", op);
851 /* Write back the result. */
852 if (op == 15 && (rn >= 8 && rn <= 11))
853 ; /* Comparison, do nothing. */
854 else if (op == 15 && rn > 17)
855 /* Integer result. */
856 gen_mov_vreg_F0(0, rd);
857 else if (op == 15 && rn == 15)
859 gen_mov_vreg_F0(!dp, rd);
861 gen_mov_vreg_F0(dp, rd);
863 /* break out of the loop if we have finished */
867 if (op == 15 && delta_m == 0) {
868 /* single source one-many */
870 rd = ((rd + delta_d) & (bank_mask - 1))
872 gen_mov_vreg_F0(dp, rd);
876 /* Setup the next operands. */
878 rd = ((rd + delta_d) & (bank_mask - 1))
882 /* One source operand. */
883 rm = ((rm + delta_m) & (bank_mask - 1))
885 gen_mov_F0_vreg(dp, rm);
887 /* Two source operands. */
888 rn = ((rn + delta_d) & (bank_mask - 1))
890 gen_mov_F0_vreg(dp, rn);
892 rm = ((rm + delta_m) & (bank_mask - 1))
894 gen_mov_F1_vreg(dp, rm);
902 if (dp && (insn & (1 << 22))) {
903 /* two-register transfer */
904 rn = (insn >> 16) & 0xf;
905 rd = (insn >> 12) & 0xf;
911 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
913 if (insn & (1 << 20)) {
916 gen_mov_F0_vreg(1, rm);
918 gen_movl_reg_T0(s, rd);
919 gen_movl_reg_T1(s, rn);
921 gen_mov_F0_vreg(0, rm);
923 gen_movl_reg_T0(s, rn);
924 gen_mov_F0_vreg(0, rm + 1);
926 gen_movl_reg_T0(s, rd);
931 gen_movl_T0_reg(s, rd);
932 gen_movl_T1_reg(s, rn);
934 gen_mov_vreg_F0(1, rm);
936 gen_movl_T0_reg(s, rn);
938 gen_mov_vreg_F0(0, rm);
939 gen_movl_T0_reg(s, rd);
941 gen_mov_vreg_F0(0, rm + 1);
946 rn = (insn >> 16) & 0xf;
948 rd = (insn >> 12) & 0xf;
950 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
951 gen_movl_T1_reg(s, rn);
952 if ((insn & 0x01200000) == 0x01000000) {
953 /* Single load/store */
954 offset = (insn & 0xff) << 2;
955 if ((insn & (1 << 23)) == 0)
957 gen_op_addl_T1_im(offset);
958 if (insn & (1 << 20)) {
960 gen_mov_vreg_F0(dp, rd);
962 gen_mov_F0_vreg(dp, rd);
966 /* load/store multiple */
968 n = (insn >> 1) & 0x7f;
972 if (insn & (1 << 24)) /* pre-decrement */
973 gen_op_addl_T1_im(-((insn & 0xff) << 2));
979 for (i = 0; i < n; i++) {
980 if (insn & (1 << 20)) {
983 gen_mov_vreg_F0(dp, rd + i);
986 gen_mov_F0_vreg(dp, rd + i);
989 gen_op_addl_T1_im(offset);
991 if (insn & (1 << 21)) {
993 if (insn & (1 << 24))
994 offset = -offset * n;
995 else if (dp && (insn & 1))
1001 gen_op_addl_T1_im(offset);
1002 gen_movl_reg_T1(s, rn);
1008 /* Should never happen. */
1014 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1016 TranslationBlock *tb;
1019 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1021 gen_op_goto_tb0(TBPARAM(tb));
1023 gen_op_goto_tb1(TBPARAM(tb));
1024 gen_op_movl_T0_im(dest);
1025 gen_op_movl_r15_T0();
1026 gen_op_movl_T0_im((long)tb + n);
1029 gen_op_movl_T0_im(dest);
1030 gen_op_movl_r15_T0();
1036 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1038 if (__builtin_expect(s->singlestep_enabled, 0)) {
1039 /* An indirect jump so that we still trigger the debug exception. */
1042 gen_op_movl_T0_im(dest);
1045 gen_goto_tb(s, 0, dest);
1046 s->is_jmp = DISAS_TB_JUMP;
1050 static inline void gen_mulxy(int x, int y)
1053 gen_op_sarl_T0_im(16);
1057 gen_op_sarl_T1_im(16);
1063 /* Return the mask of PSR bits set by a MSR instruction. */
1064 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
1068 if (flags & (1 << 0))
1070 if (flags & (1 << 1))
1072 if (flags & (1 << 2))
1074 if (flags & (1 << 3))
1076 /* Mask out undefined bits. */
1078 /* Mask out state bits. */
1080 mask &= ~0x01000020;
1081 /* Mask out privileged bits. */
1087 /* Returns nonzero if access to the PSR is not permitted. */
1088 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1091 /* ??? This is also undefined in system mode. */
1094 gen_op_movl_spsr_T0(mask);
1096 gen_op_movl_cpsr_T0(mask);
1102 static void gen_exception_return(DisasContext *s)
1104 gen_op_movl_reg_TN[0][15]();
1105 gen_op_movl_T0_spsr();
1106 gen_op_movl_cpsr_T0(0xffffffff);
1107 s->is_jmp = DISAS_UPDATE;
1110 static void disas_arm_insn(CPUState * env, DisasContext *s)
1112 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1114 insn = ldl_code(s->pc);
1119 /* Unconditional instructions. */
1120 if ((insn & 0x0d70f000) == 0x0550f000)
1122 else if ((insn & 0x0e000000) == 0x0a000000) {
1123 /* branch link and change to thumb (blx <offset>) */
1126 val = (uint32_t)s->pc;
1127 gen_op_movl_T0_im(val);
1128 gen_movl_reg_T0(s, 14);
1129 /* Sign-extend the 24-bit offset */
1130 offset = (((int32_t)insn) << 8) >> 8;
1131 /* offset * 4 + bit24 * 2 + (thumb bit) */
1132 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1133 /* pipeline offset */
1135 gen_op_movl_T0_im(val);
1138 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1139 /* Coprocessor double register transfer. */
1140 } else if ((insn & 0x0f000010) == 0x0e000010) {
1141 /* Additional coprocessor register transfer. */
1142 } else if ((insn & 0x0ff10010) == 0x01000000) {
1143 /* cps (privileged) */
1144 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1146 if (insn & (1 << 9)) {
1147 /* BE8 mode not implemented. */
1155 /* if not always execute, we generate a conditional jump to
1157 s->condlabel = gen_new_label();
1158 gen_test_cc[cond ^ 1](s->condlabel);
1160 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1161 //s->is_jmp = DISAS_JUMP_NEXT;
1163 if ((insn & 0x0f900000) == 0x03000000) {
1164 if ((insn & 0x0fb0f000) != 0x0320f000)
1166 /* CPSR = immediate */
1168 shift = ((insn >> 8) & 0xf) * 2;
1170 val = (val >> shift) | (val << (32 - shift));
1171 gen_op_movl_T0_im(val);
1172 i = ((insn & (1 << 22)) != 0);
1173 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1175 } else if ((insn & 0x0f900000) == 0x01000000
1176 && (insn & 0x00000090) != 0x00000090) {
1177 /* miscellaneous instructions */
1178 op1 = (insn >> 21) & 3;
1179 sh = (insn >> 4) & 0xf;
1182 case 0x0: /* move program status register */
1185 gen_movl_T0_reg(s, rm);
1186 i = ((op1 & 2) != 0);
1187 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1191 rd = (insn >> 12) & 0xf;
1195 gen_op_movl_T0_spsr();
1197 gen_op_movl_T0_cpsr();
1199 gen_movl_reg_T0(s, rd);
1204 /* branch/exchange thumb (bx). */
1205 gen_movl_T0_reg(s, rm);
1207 } else if (op1 == 3) {
1209 rd = (insn >> 12) & 0xf;
1210 gen_movl_T0_reg(s, rm);
1212 gen_movl_reg_T0(s, rd);
1220 /* Trivial implementation equivalent to bx. */
1221 gen_movl_T0_reg(s, rm);
1231 /* branch link/exchange thumb (blx) */
1232 val = (uint32_t)s->pc;
1233 gen_op_movl_T0_im(val);
1234 gen_movl_reg_T0(s, 14);
1235 gen_movl_T0_reg(s, rm);
1238 case 0x5: /* saturating add/subtract */
1239 rd = (insn >> 12) & 0xf;
1240 rn = (insn >> 16) & 0xf;
1241 gen_movl_T0_reg(s, rm);
1242 gen_movl_T1_reg(s, rn);
1244 gen_op_double_T1_saturate();
1246 gen_op_subl_T0_T1_saturate();
1248 gen_op_addl_T0_T1_saturate();
1249 gen_movl_reg_T0(s, rd);
1252 gen_op_movl_T0_im((long)s->pc - 4);
1253 gen_op_movl_reg_TN[0][15]();
1255 s->is_jmp = DISAS_JUMP;
1257 case 0x8: /* signed multiply */
1261 rs = (insn >> 8) & 0xf;
1262 rn = (insn >> 12) & 0xf;
1263 rd = (insn >> 16) & 0xf;
1265 /* (32 * 16) >> 16 */
1266 gen_movl_T0_reg(s, rm);
1267 gen_movl_T1_reg(s, rs);
1269 gen_op_sarl_T1_im(16);
1272 gen_op_imulw_T0_T1();
1273 if ((sh & 2) == 0) {
1274 gen_movl_T1_reg(s, rn);
1275 gen_op_addl_T0_T1_setq();
1277 gen_movl_reg_T0(s, rd);
1280 gen_movl_T0_reg(s, rm);
1281 gen_movl_T1_reg(s, rs);
1282 gen_mulxy(sh & 2, sh & 4);
1284 gen_op_signbit_T1_T0();
1285 gen_op_addq_T0_T1(rn, rd);
1286 gen_movl_reg_T0(s, rn);
1287 gen_movl_reg_T1(s, rd);
1290 gen_movl_T1_reg(s, rn);
1291 gen_op_addl_T0_T1_setq();
1293 gen_movl_reg_T0(s, rd);
1300 } else if (((insn & 0x0e000000) == 0 &&
1301 (insn & 0x00000090) != 0x90) ||
1302 ((insn & 0x0e000000) == (1 << 25))) {
1303 int set_cc, logic_cc, shiftop;
1305 op1 = (insn >> 21) & 0xf;
1306 set_cc = (insn >> 20) & 1;
1307 logic_cc = table_logic_cc[op1] & set_cc;
1309 /* data processing instruction */
1310 if (insn & (1 << 25)) {
1311 /* immediate operand */
1313 shift = ((insn >> 8) & 0xf) * 2;
1315 val = (val >> shift) | (val << (32 - shift));
1316 gen_op_movl_T1_im(val);
1317 if (logic_cc && shift)
1322 gen_movl_T1_reg(s, rm);
1323 shiftop = (insn >> 5) & 3;
1324 if (!(insn & (1 << 4))) {
1325 shift = (insn >> 7) & 0x1f;
1328 gen_shift_T1_im_cc[shiftop](shift);
1330 gen_shift_T1_im[shiftop](shift);
1332 } else if (shiftop != 0) {
1334 gen_shift_T1_0_cc[shiftop]();
1336 gen_shift_T1_0[shiftop]();
1340 rs = (insn >> 8) & 0xf;
1341 gen_movl_T0_reg(s, rs);
1343 gen_shift_T1_T0_cc[shiftop]();
1345 gen_shift_T1_T0[shiftop]();
1349 if (op1 != 0x0f && op1 != 0x0d) {
1350 rn = (insn >> 16) & 0xf;
1351 gen_movl_T0_reg(s, rn);
1353 rd = (insn >> 12) & 0xf;
1356 gen_op_andl_T0_T1();
1357 gen_movl_reg_T0(s, rd);
1359 gen_op_logic_T0_cc();
1362 gen_op_xorl_T0_T1();
1363 gen_movl_reg_T0(s, rd);
1365 gen_op_logic_T0_cc();
1368 if (set_cc && rd == 15) {
1369 /* SUBS r15, ... is used for exception return. */
1372 gen_op_subl_T0_T1_cc();
1373 gen_exception_return(s);
1376 gen_op_subl_T0_T1_cc();
1378 gen_op_subl_T0_T1();
1379 gen_movl_reg_T0(s, rd);
1384 gen_op_rsbl_T0_T1_cc();
1386 gen_op_rsbl_T0_T1();
1387 gen_movl_reg_T0(s, rd);
1391 gen_op_addl_T0_T1_cc();
1393 gen_op_addl_T0_T1();
1394 gen_movl_reg_T0(s, rd);
1398 gen_op_adcl_T0_T1_cc();
1400 gen_op_adcl_T0_T1();
1401 gen_movl_reg_T0(s, rd);
1405 gen_op_sbcl_T0_T1_cc();
1407 gen_op_sbcl_T0_T1();
1408 gen_movl_reg_T0(s, rd);
1412 gen_op_rscl_T0_T1_cc();
1414 gen_op_rscl_T0_T1();
1415 gen_movl_reg_T0(s, rd);
1419 gen_op_andl_T0_T1();
1420 gen_op_logic_T0_cc();
1425 gen_op_xorl_T0_T1();
1426 gen_op_logic_T0_cc();
1431 gen_op_subl_T0_T1_cc();
1436 gen_op_addl_T0_T1_cc();
1441 gen_movl_reg_T0(s, rd);
1443 gen_op_logic_T0_cc();
1446 if (logic_cc && rd == 15) {
1447 /* MOVS r15, ... is used for exception return. */
1450 gen_op_movl_T0_T1();
1451 gen_exception_return(s);
1453 gen_movl_reg_T1(s, rd);
1455 gen_op_logic_T1_cc();
1459 gen_op_bicl_T0_T1();
1460 gen_movl_reg_T0(s, rd);
1462 gen_op_logic_T0_cc();
1467 gen_movl_reg_T1(s, rd);
1469 gen_op_logic_T1_cc();
1473 /* other instructions */
1474 op1 = (insn >> 24) & 0xf;
1478 /* multiplies, extra load/stores */
1479 sh = (insn >> 5) & 3;
1482 rd = (insn >> 16) & 0xf;
1483 rn = (insn >> 12) & 0xf;
1484 rs = (insn >> 8) & 0xf;
1486 if (((insn >> 22) & 3) == 0) {
1488 gen_movl_T0_reg(s, rs);
1489 gen_movl_T1_reg(s, rm);
1491 if (insn & (1 << 21)) {
1492 gen_movl_T1_reg(s, rn);
1493 gen_op_addl_T0_T1();
1495 if (insn & (1 << 20))
1496 gen_op_logic_T0_cc();
1497 gen_movl_reg_T0(s, rd);
1500 gen_movl_T0_reg(s, rs);
1501 gen_movl_T1_reg(s, rm);
1502 if (insn & (1 << 22))
1503 gen_op_imull_T0_T1();
1505 gen_op_mull_T0_T1();
1506 if (insn & (1 << 21)) /* mult accumulate */
1507 gen_op_addq_T0_T1(rn, rd);
1508 if (!(insn & (1 << 23))) { /* double accumulate */
1510 gen_op_addq_lo_T0_T1(rn);
1511 gen_op_addq_lo_T0_T1(rd);
1513 if (insn & (1 << 20))
1515 gen_movl_reg_T0(s, rn);
1516 gen_movl_reg_T1(s, rd);
1519 rn = (insn >> 16) & 0xf;
1520 rd = (insn >> 12) & 0xf;
1521 if (insn & (1 << 23)) {
1522 /* load/store exclusive */
1525 /* SWP instruction */
1528 gen_movl_T0_reg(s, rm);
1529 gen_movl_T1_reg(s, rn);
1530 if (insn & (1 << 22)) {
1535 gen_movl_reg_T0(s, rd);
1541 /* Misc load/store */
1542 rn = (insn >> 16) & 0xf;
1543 rd = (insn >> 12) & 0xf;
1544 gen_movl_T1_reg(s, rn);
1545 if (insn & (1 << 24))
1546 gen_add_datah_offset(s, insn, 0);
1548 if (insn & (1 << 20)) {
1563 } else if (sh & 2) {
1567 gen_movl_T0_reg(s, rd);
1569 gen_op_addl_T1_im(4);
1570 gen_movl_T0_reg(s, rd + 1);
1576 gen_movl_reg_T0(s, rd);
1577 gen_op_addl_T1_im(4);
1582 address_offset = -4;
1585 gen_movl_T0_reg(s, rd);
1589 /* Perform base writeback before the loaded value to
1590 ensure correct behavior with overlapping index registers.
1591 ldrd with base writeback is is undefined if the
1592 destination and index registers overlap. */
1593 if (!(insn & (1 << 24))) {
1594 gen_add_datah_offset(s, insn, address_offset);
1595 gen_movl_reg_T1(s, rn);
1596 } else if (insn & (1 << 21)) {
1598 gen_op_addl_T1_im(address_offset);
1599 gen_movl_reg_T1(s, rn);
1602 /* Complete the load. */
1603 gen_movl_reg_T0(s, rd);
1611 /* Check for undefined extension instructions
1612 * per the ARM Bible IE:
1613 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1615 sh = (0xf << 20) | (0xf << 4);
1616 if (op1 == 0x7 && ((insn & sh) == sh))
1620 /* load/store byte/word */
1621 rn = (insn >> 16) & 0xf;
1622 rd = (insn >> 12) & 0xf;
1623 gen_movl_T1_reg(s, rn);
1624 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1625 if (insn & (1 << 24))
1626 gen_add_data_offset(s, insn);
1627 if (insn & (1 << 20)) {
1630 #if defined(CONFIG_USER_ONLY)
1631 if (insn & (1 << 22))
1636 if (insn & (1 << 22)) {
1640 gen_op_ldub_kernel();
1645 gen_op_ldl_kernel();
1650 gen_movl_T0_reg(s, rd);
1651 #if defined(CONFIG_USER_ONLY)
1652 if (insn & (1 << 22))
1657 if (insn & (1 << 22)) {
1661 gen_op_stb_kernel();
1666 gen_op_stl_kernel();
1670 if (!(insn & (1 << 24))) {
1671 gen_add_data_offset(s, insn);
1672 gen_movl_reg_T1(s, rn);
1673 } else if (insn & (1 << 21))
1674 gen_movl_reg_T1(s, rn); {
1676 if (insn & (1 << 20)) {
1677 /* Complete the load. */
1681 gen_movl_reg_T0(s, rd);
1687 int j, n, user, loaded_base;
1688 /* load/store multiple words */
1689 /* XXX: store correct base if write back */
1691 if (insn & (1 << 22)) {
1693 goto illegal_op; /* only usable in supervisor mode */
1695 if ((insn & (1 << 15)) == 0)
1698 rn = (insn >> 16) & 0xf;
1699 gen_movl_T1_reg(s, rn);
1701 /* compute total size */
1705 if (insn & (1 << i))
1708 /* XXX: test invalid n == 0 case ? */
1709 if (insn & (1 << 23)) {
1710 if (insn & (1 << 24)) {
1712 gen_op_addl_T1_im(4);
1714 /* post increment */
1717 if (insn & (1 << 24)) {
1719 gen_op_addl_T1_im(-(n * 4));
1721 /* post decrement */
1723 gen_op_addl_T1_im(-((n - 1) * 4));
1728 if (insn & (1 << i)) {
1729 if (insn & (1 << 20)) {
1735 gen_op_movl_user_T0(i);
1736 } else if (i == rn) {
1737 gen_op_movl_T2_T0();
1740 gen_movl_reg_T0(s, i);
1745 /* special case: r15 = PC + 12 */
1746 val = (long)s->pc + 8;
1747 gen_op_movl_TN_im[0](val);
1749 gen_op_movl_T0_user(i);
1751 gen_movl_T0_reg(s, i);
1756 /* no need to add after the last transfer */
1758 gen_op_addl_T1_im(4);
1761 if (insn & (1 << 21)) {
1763 if (insn & (1 << 23)) {
1764 if (insn & (1 << 24)) {
1767 /* post increment */
1768 gen_op_addl_T1_im(4);
1771 if (insn & (1 << 24)) {
1774 gen_op_addl_T1_im(-((n - 1) * 4));
1776 /* post decrement */
1777 gen_op_addl_T1_im(-(n * 4));
1780 gen_movl_reg_T1(s, rn);
1783 gen_op_movl_T0_T2();
1784 gen_movl_reg_T0(s, rn);
1786 if ((insn & (1 << 22)) && !user) {
1787 /* Restore CPSR from SPSR. */
1788 gen_op_movl_T0_spsr();
1789 gen_op_movl_cpsr_T0(0xffffffff);
1790 s->is_jmp = DISAS_UPDATE;
1799 /* branch (and link) */
1800 val = (int32_t)s->pc;
1801 if (insn & (1 << 24)) {
1802 gen_op_movl_T0_im(val);
1803 gen_op_movl_reg_TN[0][14]();
1805 offset = (((int32_t)insn << 8) >> 8);
1806 val += (offset << 2) + 4;
1814 op1 = (insn >> 8) & 0xf;
1818 if (disas_vfp_insn (env, s, insn))
1822 if (disas_cp15_insn (s, insn))
1826 /* unknown coprocessor. */
1832 gen_op_movl_T0_im((long)s->pc);
1833 gen_op_movl_reg_TN[0][15]();
1835 s->is_jmp = DISAS_JUMP;
1839 gen_op_movl_T0_im((long)s->pc - 4);
1840 gen_op_movl_reg_TN[0][15]();
1841 gen_op_undef_insn();
1842 s->is_jmp = DISAS_JUMP;
1848 static void disas_thumb_insn(DisasContext *s)
1850 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1854 insn = lduw_code(s->pc);
1857 switch (insn >> 12) {
1860 op = (insn >> 11) & 3;
1863 rn = (insn >> 3) & 7;
1864 gen_movl_T0_reg(s, rn);
1865 if (insn & (1 << 10)) {
1867 gen_op_movl_T1_im((insn >> 6) & 7);
1870 rm = (insn >> 6) & 7;
1871 gen_movl_T1_reg(s, rm);
1873 if (insn & (1 << 9))
1874 gen_op_subl_T0_T1_cc();
1876 gen_op_addl_T0_T1_cc();
1877 gen_movl_reg_T0(s, rd);
1879 /* shift immediate */
1880 rm = (insn >> 3) & 7;
1881 shift = (insn >> 6) & 0x1f;
1882 gen_movl_T0_reg(s, rm);
1883 gen_shift_T0_im_thumb[op](shift);
1884 gen_movl_reg_T0(s, rd);
1888 /* arithmetic large immediate */
1889 op = (insn >> 11) & 3;
1890 rd = (insn >> 8) & 0x7;
1892 gen_op_movl_T0_im(insn & 0xff);
1894 gen_movl_T0_reg(s, rd);
1895 gen_op_movl_T1_im(insn & 0xff);
1899 gen_op_logic_T0_cc();
1902 gen_op_subl_T0_T1_cc();
1905 gen_op_addl_T0_T1_cc();
1908 gen_op_subl_T0_T1_cc();
1912 gen_movl_reg_T0(s, rd);
1915 if (insn & (1 << 11)) {
1916 rd = (insn >> 8) & 7;
1917 /* load pc-relative. Bit 1 of PC is ignored. */
1918 val = s->pc + 2 + ((insn & 0xff) * 4);
1919 val &= ~(uint32_t)2;
1920 gen_op_movl_T1_im(val);
1922 gen_movl_reg_T0(s, rd);
1925 if (insn & (1 << 10)) {
1926 /* data processing extended or blx */
1927 rd = (insn & 7) | ((insn >> 4) & 8);
1928 rm = (insn >> 3) & 0xf;
1929 op = (insn >> 8) & 3;
1932 gen_movl_T0_reg(s, rd);
1933 gen_movl_T1_reg(s, rm);
1934 gen_op_addl_T0_T1();
1935 gen_movl_reg_T0(s, rd);
1938 gen_movl_T0_reg(s, rd);
1939 gen_movl_T1_reg(s, rm);
1940 gen_op_subl_T0_T1_cc();
1942 case 2: /* mov/cpy */
1943 gen_movl_T0_reg(s, rm);
1944 gen_movl_reg_T0(s, rd);
1946 case 3:/* branch [and link] exchange thumb register */
1947 if (insn & (1 << 7)) {
1948 val = (uint32_t)s->pc | 1;
1949 gen_op_movl_T1_im(val);
1950 gen_movl_reg_T1(s, 14);
1952 gen_movl_T0_reg(s, rm);
1959 /* data processing register */
1961 rm = (insn >> 3) & 7;
1962 op = (insn >> 6) & 0xf;
1963 if (op == 2 || op == 3 || op == 4 || op == 7) {
1964 /* the shift/rotate ops want the operands backwards */
1973 if (op == 9) /* neg */
1974 gen_op_movl_T0_im(0);
1975 else if (op != 0xf) /* mvn doesn't read its first operand */
1976 gen_movl_T0_reg(s, rd);
1978 gen_movl_T1_reg(s, rm);
1981 gen_op_andl_T0_T1();
1982 gen_op_logic_T0_cc();
1985 gen_op_xorl_T0_T1();
1986 gen_op_logic_T0_cc();
1989 gen_op_shll_T1_T0_cc();
1990 gen_op_logic_T1_cc();
1993 gen_op_shrl_T1_T0_cc();
1994 gen_op_logic_T1_cc();
1997 gen_op_sarl_T1_T0_cc();
1998 gen_op_logic_T1_cc();
2001 gen_op_adcl_T0_T1_cc();
2004 gen_op_sbcl_T0_T1_cc();
2007 gen_op_rorl_T1_T0_cc();
2008 gen_op_logic_T1_cc();
2011 gen_op_andl_T0_T1();
2012 gen_op_logic_T0_cc();
2016 gen_op_subl_T0_T1_cc();
2019 gen_op_subl_T0_T1_cc();
2023 gen_op_addl_T0_T1_cc();
2028 gen_op_logic_T0_cc();
2031 gen_op_mull_T0_T1();
2032 gen_op_logic_T0_cc();
2035 gen_op_bicl_T0_T1();
2036 gen_op_logic_T0_cc();
2040 gen_op_logic_T1_cc();
2047 gen_movl_reg_T1(s, rm);
2049 gen_movl_reg_T0(s, rd);
2054 /* load/store register offset. */
2056 rn = (insn >> 3) & 7;
2057 rm = (insn >> 6) & 7;
2058 op = (insn >> 9) & 7;
2059 gen_movl_T1_reg(s, rn);
2060 gen_movl_T2_reg(s, rm);
2061 gen_op_addl_T1_T2();
2063 if (op < 3) /* store */
2064 gen_movl_T0_reg(s, rd);
2092 if (op >= 3) /* load */
2093 gen_movl_reg_T0(s, rd);
2097 /* load/store word immediate offset */
2099 rn = (insn >> 3) & 7;
2100 gen_movl_T1_reg(s, rn);
2101 val = (insn >> 4) & 0x7c;
2102 gen_op_movl_T2_im(val);
2103 gen_op_addl_T1_T2();
2105 if (insn & (1 << 11)) {
2108 gen_movl_reg_T0(s, rd);
2111 gen_movl_T0_reg(s, rd);
2117 /* load/store byte immediate offset */
2119 rn = (insn >> 3) & 7;
2120 gen_movl_T1_reg(s, rn);
2121 val = (insn >> 6) & 0x1f;
2122 gen_op_movl_T2_im(val);
2123 gen_op_addl_T1_T2();
2125 if (insn & (1 << 11)) {
2128 gen_movl_reg_T0(s, rd);
2131 gen_movl_T0_reg(s, rd);
2137 /* load/store halfword immediate offset */
2139 rn = (insn >> 3) & 7;
2140 gen_movl_T1_reg(s, rn);
2141 val = (insn >> 5) & 0x3e;
2142 gen_op_movl_T2_im(val);
2143 gen_op_addl_T1_T2();
2145 if (insn & (1 << 11)) {
2148 gen_movl_reg_T0(s, rd);
2151 gen_movl_T0_reg(s, rd);
2157 /* load/store from stack */
2158 rd = (insn >> 8) & 7;
2159 gen_movl_T1_reg(s, 13);
2160 val = (insn & 0xff) * 4;
2161 gen_op_movl_T2_im(val);
2162 gen_op_addl_T1_T2();
2164 if (insn & (1 << 11)) {
2167 gen_movl_reg_T0(s, rd);
2170 gen_movl_T0_reg(s, rd);
2176 /* add to high reg */
2177 rd = (insn >> 8) & 7;
2178 if (insn & (1 << 11)) {
2180 gen_movl_T0_reg(s, 13);
2182 /* PC. bit 1 is ignored. */
2183 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2185 val = (insn & 0xff) * 4;
2186 gen_op_movl_T1_im(val);
2187 gen_op_addl_T0_T1();
2188 gen_movl_reg_T0(s, rd);
2193 op = (insn >> 8) & 0xf;
2196 /* adjust stack pointer */
2197 gen_movl_T1_reg(s, 13);
2198 val = (insn & 0x7f) * 4;
2199 if (insn & (1 << 7))
2200 val = -(int32_t)val;
2201 gen_op_movl_T2_im(val);
2202 gen_op_addl_T1_T2();
2203 gen_movl_reg_T1(s, 13);
2206 case 4: case 5: case 0xc: case 0xd:
2208 gen_movl_T1_reg(s, 13);
2209 if (insn & (1 << 8))
2213 for (i = 0; i < 8; i++) {
2214 if (insn & (1 << i))
2217 if ((insn & (1 << 11)) == 0) {
2218 gen_op_movl_T2_im(-offset);
2219 gen_op_addl_T1_T2();
2221 gen_op_movl_T2_im(4);
2222 for (i = 0; i < 8; i++) {
2223 if (insn & (1 << i)) {
2224 if (insn & (1 << 11)) {
2227 gen_movl_reg_T0(s, i);
2230 gen_movl_T0_reg(s, i);
2233 /* advance to the next address. */
2234 gen_op_addl_T1_T2();
2237 if (insn & (1 << 8)) {
2238 if (insn & (1 << 11)) {
2241 /* don't set the pc until the rest of the instruction
2245 gen_movl_T0_reg(s, 14);
2248 gen_op_addl_T1_T2();
2250 if ((insn & (1 << 11)) == 0) {
2251 gen_op_movl_T2_im(-offset);
2252 gen_op_addl_T1_T2();
2254 /* write back the new stack pointer */
2255 gen_movl_reg_T1(s, 13);
2256 /* set the new PC value */
2257 if ((insn & 0x0900) == 0x0900)
2261 case 0xe: /* bkpt */
2262 gen_op_movl_T0_im((long)s->pc - 2);
2263 gen_op_movl_reg_TN[0][15]();
2265 s->is_jmp = DISAS_JUMP;
2274 /* load/store multiple */
2275 rn = (insn >> 8) & 0x7;
2276 gen_movl_T1_reg(s, rn);
2277 gen_op_movl_T2_im(4);
2278 for (i = 0; i < 8; i++) {
2279 if (insn & (1 << i)) {
2280 if (insn & (1 << 11)) {
2283 gen_movl_reg_T0(s, i);
2286 gen_movl_T0_reg(s, i);
2289 /* advance to the next address */
2290 gen_op_addl_T1_T2();
2293 /* Base register writeback. */
2294 if ((insn & (1 << rn)) == 0)
2295 gen_movl_reg_T1(s, rn);
2299 /* conditional branch or swi */
2300 cond = (insn >> 8) & 0xf;
2306 gen_op_movl_T0_im((long)s->pc | 1);
2307 /* Don't set r15. */
2308 gen_op_movl_reg_TN[0][15]();
2310 s->is_jmp = DISAS_JUMP;
2313 /* generate a conditional jump to next instruction */
2314 s->condlabel = gen_new_label();
2315 gen_test_cc[cond ^ 1](s->condlabel);
2317 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2318 //s->is_jmp = DISAS_JUMP_NEXT;
2319 gen_movl_T1_reg(s, 15);
2321 /* jump to the offset */
2322 val = (uint32_t)s->pc + 2;
2323 offset = ((int32_t)insn << 24) >> 24;
2329 /* unconditional branch */
2330 if (insn & (1 << 11)) {
2331 /* Second half of blx. */
2332 offset = ((insn & 0x7ff) << 1);
2333 gen_movl_T0_reg(s, 14);
2334 gen_op_movl_T1_im(offset);
2335 gen_op_addl_T0_T1();
2336 gen_op_movl_T1_im(0xfffffffc);
2337 gen_op_andl_T0_T1();
2339 val = (uint32_t)s->pc;
2340 gen_op_movl_T1_im(val | 1);
2341 gen_movl_reg_T1(s, 14);
2345 val = (uint32_t)s->pc;
2346 offset = ((int32_t)insn << 21) >> 21;
2347 val += (offset << 1) + 2;
2352 /* branch and link [and switch to arm] */
2353 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
2354 /* Instruction spans a page boundary. Implement it as two
2355 16-bit instructions in case the second half causes an
2357 offset = ((int32_t)insn << 21) >> 9;
2358 val = s->pc + 2 + offset;
2359 gen_op_movl_T0_im(val);
2360 gen_movl_reg_T0(s, 14);
2363 if (insn & (1 << 11)) {
2364 /* Second half of bl. */
2365 offset = ((insn & 0x7ff) << 1) | 1;
2366 gen_movl_T0_reg(s, 14);
2367 gen_op_movl_T1_im(offset);
2368 gen_op_addl_T0_T1();
2370 val = (uint32_t)s->pc;
2371 gen_op_movl_T1_im(val | 1);
2372 gen_movl_reg_T1(s, 14);
2376 offset = ((int32_t)insn << 21) >> 10;
2377 insn = lduw_code(s->pc);
2378 offset |= insn & 0x7ff;
2380 val = (uint32_t)s->pc + 2;
2381 gen_op_movl_T1_im(val | 1);
2382 gen_movl_reg_T1(s, 14);
2385 if (insn & (1 << 12)) {
2390 val &= ~(uint32_t)2;
2391 gen_op_movl_T0_im(val);
2397 gen_op_movl_T0_im((long)s->pc - 2);
2398 gen_op_movl_reg_TN[0][15]();
2399 gen_op_undef_insn();
2400 s->is_jmp = DISAS_JUMP;
2403 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2404 basic block 'tb'. If search_pc is TRUE, also generate PC
2405 information for each intermediate instruction. */
2406 static inline int gen_intermediate_code_internal(CPUState *env,
2407 TranslationBlock *tb,
2410 DisasContext dc1, *dc = &dc1;
2411 uint16_t *gen_opc_end;
2413 target_ulong pc_start;
2414 uint32_t next_page_start;
2416 /* generate intermediate code */
2421 gen_opc_ptr = gen_opc_buf;
2422 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2423 gen_opparam_ptr = gen_opparam_buf;
2425 dc->is_jmp = DISAS_NEXT;
2427 dc->singlestep_enabled = env->singlestep_enabled;
2429 dc->thumb = env->thumb;
2431 #if !defined(CONFIG_USER_ONLY)
2432 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2434 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2438 if (env->nb_breakpoints > 0) {
2439 for(j = 0; j < env->nb_breakpoints; j++) {
2440 if (env->breakpoints[j] == dc->pc) {
2441 gen_op_movl_T0_im((long)dc->pc);
2442 gen_op_movl_reg_TN[0][15]();
2444 dc->is_jmp = DISAS_JUMP;
2450 j = gen_opc_ptr - gen_opc_buf;
2454 gen_opc_instr_start[lj++] = 0;
2456 gen_opc_pc[lj] = dc->pc;
2457 gen_opc_instr_start[lj] = 1;
2461 disas_thumb_insn(dc);
2463 disas_arm_insn(env, dc);
2465 if (dc->condjmp && !dc->is_jmp) {
2466 gen_set_label(dc->condlabel);
2469 /* Terminate the TB on memory ops if watchpoints are present. */
2470 /* FIXME: This should be replacd by the deterministic execution
2471 * IRQ raising bits. */
2472 if (dc->is_mem && env->nb_watchpoints)
2475 /* Translation stops when a conditional branch is enoutered.
2476 * Otherwise the subsequent code could get translated several times.
2477 * Also stop translation when a page boundary is reached. This
2478 * ensures prefech aborts occur at the right place. */
2479 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2480 !env->singlestep_enabled &&
2481 dc->pc < next_page_start);
2482 /* At this stage dc->condjmp will only be set when the skipped
2483 * instruction was a conditional branch, and the PC has already been
2485 if (__builtin_expect(env->singlestep_enabled, 0)) {
2486 /* Make sure the pc is updated, and raise a debug exception. */
2489 gen_set_label(dc->condlabel);
2491 if (dc->condjmp || !dc->is_jmp) {
2492 gen_op_movl_T0_im((long)dc->pc);
2493 gen_op_movl_reg_TN[0][15]();
2498 switch(dc->is_jmp) {
2500 gen_goto_tb(dc, 1, dc->pc);
2505 /* indicate that the hash table must be used to find the next TB */
2510 /* nothing more to generate */
2514 gen_set_label(dc->condlabel);
2515 gen_goto_tb(dc, 1, dc->pc);
2519 *gen_opc_ptr = INDEX_op_end;
2522 if (loglevel & CPU_LOG_TB_IN_ASM) {
2523 fprintf(logfile, "----------------\n");
2524 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2525 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2526 fprintf(logfile, "\n");
2527 if (loglevel & (CPU_LOG_TB_OP)) {
2528 fprintf(logfile, "OP:\n");
2529 dump_ops(gen_opc_buf, gen_opparam_buf);
2530 fprintf(logfile, "\n");
2535 j = gen_opc_ptr - gen_opc_buf;
2538 gen_opc_instr_start[lj++] = 0;
2541 tb->size = dc->pc - pc_start;
2546 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2548 return gen_intermediate_code_internal(env, tb, 0);
2551 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2553 return gen_intermediate_code_internal(env, tb, 1);
2556 static const char *cpu_mode_names[16] = {
2557 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2558 "???", "???", "???", "und", "???", "???", "???", "sys"
2560 void cpu_dump_state(CPUState *env, FILE *f,
2561 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2570 /* ??? This assumes float64 and double have the same layout.
2571 Oh well, it's only debug dumps. */
2579 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2581 cpu_fprintf(f, "\n");
2583 cpu_fprintf(f, " ");
2585 psr = cpsr_read(env);
2586 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
2588 psr & (1 << 31) ? 'N' : '-',
2589 psr & (1 << 30) ? 'Z' : '-',
2590 psr & (1 << 29) ? 'C' : '-',
2591 psr & (1 << 28) ? 'V' : '-',
2592 psr & CPSR_T ? 'T' : 'A',
2593 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2595 for (i = 0; i < 16; i++) {
2596 d.d = env->vfp.regs[i];
2600 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2601 i * 2, (int)s0.i, s0.s,
2602 i * 2 + 1, (int)s1.i, s1.s,
2603 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2606 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);