4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 //#define DEBUG_DISPATCH 1
39 /* Fake floating point. */
40 #define TCG_TYPE_F32 TCG_TYPE_I32
41 #define TCG_TYPE_F64 TCG_TYPE_I64
42 #define tcg_gen_mov_f64 tcg_gen_mov_i64
43 #define tcg_gen_qemu_ldf32 tcg_gen_qemu_ld32u
44 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
45 #define tcg_gen_qemu_stf32 tcg_gen_qemu_st32
46 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
47 #define gen_helper_pack_32_f32 tcg_gen_mov_i32
48 #define gen_helper_pack_f32_32 tcg_gen_mov_i32
50 #define DEFO32(name, offset) static TCGv QREG_##name;
51 #define DEFO64(name, offset) static TCGv QREG_##name;
52 #define DEFF64(name, offset) static TCGv QREG_##name;
60 static char cpu_reg_names[3*8*3 + 5*4];
61 static TCGv cpu_dregs[8];
62 static TCGv cpu_aregs[8];
63 static TCGv cpu_fregs[8];
64 static TCGv cpu_macc[4];
66 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
67 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
68 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
69 #define MACREG(acc) cpu_macc[acc]
70 #define QREG_SP cpu_aregs[7]
72 static TCGv NULL_QREG;
73 #define IS_NULL_QREG(t) (GET_TCGV(t) == GET_TCGV(NULL_QREG))
74 /* Used to distinguish stores from bad addressing modes. */
75 static TCGv store_dummy;
77 #include "gen-icount.h"
79 void m68k_tcg_init(void)
84 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, offset), #name);
85 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, offsetof(CPUState, offset), #name);
86 #define DEFF64(name, offset) DEFO64(name, offset)
92 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
95 for (i = 0; i < 8; i++) {
97 cpu_dregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
98 offsetof(CPUM68KState, dregs[i]), p);
100 sprintf(p, "A%d", i);
101 cpu_aregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
102 offsetof(CPUM68KState, aregs[i]), p);
104 sprintf(p, "F%d", i);
105 cpu_fregs[i] = tcg_global_mem_new(TCG_TYPE_F64, TCG_AREG0,
106 offsetof(CPUM68KState, fregs[i]), p);
109 for (i = 0; i < 4; i++) {
110 sprintf(p, "ACC%d", i);
111 cpu_macc[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
112 offsetof(CPUM68KState, macc[i]), p);
116 NULL_QREG = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, -4, "NULL");
117 store_dummy = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, -8, "NULL");
119 #define DEF_HELPER(name, ret, args) \
120 tcg_register_helper(HELPER(name), #name);
124 static inline void qemu_assert(int cond, const char *msg)
127 fprintf (stderr, "badness: %s\n", msg);
132 /* internal defines */
133 typedef struct DisasContext {
135 target_ulong insn_pc; /* Start of the current instruction. */
141 struct TranslationBlock *tb;
142 int singlestep_enabled;
147 #define DISAS_JUMP_NEXT 4
149 #if defined(CONFIG_USER_ONLY)
152 #define IS_USER(s) s->user
155 /* XXX: move that elsewhere */
156 /* ??? Fix exceptions. */
157 static void *gen_throws_exception;
158 #define gen_last_qop NULL
160 extern FILE *logfile;
169 typedef void (*disas_proc)(DisasContext *, uint16_t);
171 #ifdef DEBUG_DISPATCH
172 #define DISAS_INSN(name) \
173 static void real_disas_##name (DisasContext *s, uint16_t insn); \
174 static void disas_##name (DisasContext *s, uint16_t insn) { \
175 if (logfile) fprintf(logfile, "Dispatch " #name "\n"); \
176 real_disas_##name(s, insn); } \
177 static void real_disas_##name (DisasContext *s, uint16_t insn)
179 #define DISAS_INSN(name) \
180 static void disas_##name (DisasContext *s, uint16_t insn)
183 /* FIXME: Remove this. */
184 #define gen_im32(val) tcg_const_i32(val)
186 #define QMODE_I32 TCG_TYPE_I32
187 #define QMODE_I64 TCG_TYPE_I64
188 #define QMODE_F32 TCG_TYPE_F32
189 #define QMODE_F64 TCG_TYPE_F64
190 static inline TCGv gen_new_qreg(int mode)
192 return tcg_temp_new(mode);
195 /* Generate a load from the specified address. Narrow values are
196 sign extended to full register width. */
197 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
200 int index = IS_USER(s);
204 tmp = gen_new_qreg(QMODE_I32);
206 tcg_gen_qemu_ld8s(tmp, addr, index);
208 tcg_gen_qemu_ld8u(tmp, addr, index);
211 tmp = gen_new_qreg(QMODE_I32);
213 tcg_gen_qemu_ld16s(tmp, addr, index);
215 tcg_gen_qemu_ld16u(tmp, addr, index);
218 tmp = gen_new_qreg(QMODE_I32);
219 tcg_gen_qemu_ld32u(tmp, addr, index);
222 tmp = gen_new_qreg(QMODE_F32);
223 tcg_gen_qemu_ldf32(tmp, addr, index);
226 tmp = gen_new_qreg(QMODE_F64);
227 tcg_gen_qemu_ldf64(tmp, addr, index);
230 qemu_assert(0, "bad load size");
232 gen_throws_exception = gen_last_qop;
236 /* Generate a store. */
237 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
239 int index = IS_USER(s);
243 tcg_gen_qemu_st8(val, addr, index);
246 tcg_gen_qemu_st16(val, addr, index);
249 tcg_gen_qemu_st32(val, addr, index);
252 tcg_gen_qemu_stf32(val, addr, index);
255 tcg_gen_qemu_stf64(val, addr, index);
258 qemu_assert(0, "bad store size");
260 gen_throws_exception = gen_last_qop;
269 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
270 otherwise generate a store. */
271 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
274 if (what == EA_STORE) {
275 gen_store(s, opsize, addr, val);
278 return gen_load(s, opsize, addr, what == EA_LOADS);
282 /* Read a 32-bit immediate constant. */
283 static inline uint32_t read_im32(DisasContext *s)
286 im = ((uint32_t)lduw_code(s->pc)) << 16;
288 im |= lduw_code(s->pc);
293 /* Calculate and address index. */
294 static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
299 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
300 if ((ext & 0x800) == 0) {
301 tcg_gen_ext16s_i32(tmp, add);
304 scale = (ext >> 9) & 3;
306 tcg_gen_shli_i32(tmp, add, scale);
312 /* Handle a base + index + displacement effective addresss.
313 A NULL_QREG base means pc-relative. */
314 static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base)
323 ext = lduw_code(s->pc);
326 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
330 /* full extension word format */
331 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
334 if ((ext & 0x30) > 0x10) {
335 /* base displacement */
336 if ((ext & 0x30) == 0x20) {
337 bd = (int16_t)lduw_code(s->pc);
345 tmp = gen_new_qreg(QMODE_I32);
346 if ((ext & 0x44) == 0) {
348 add = gen_addr_index(ext, tmp);
352 if ((ext & 0x80) == 0) {
353 /* base not suppressed */
354 if (IS_NULL_QREG(base)) {
355 base = gen_im32(offset + bd);
358 if (!IS_NULL_QREG(add)) {
359 tcg_gen_add_i32(tmp, add, base);
365 if (!IS_NULL_QREG(add)) {
367 tcg_gen_addi_i32(tmp, add, bd);
373 if ((ext & 3) != 0) {
374 /* memory indirect */
375 base = gen_load(s, OS_LONG, add, 0);
376 if ((ext & 0x44) == 4) {
377 add = gen_addr_index(ext, tmp);
378 tcg_gen_add_i32(tmp, add, base);
384 /* outer displacement */
385 if ((ext & 3) == 2) {
386 od = (int16_t)lduw_code(s->pc);
395 tcg_gen_addi_i32(tmp, add, od);
400 /* brief extension word format */
401 tmp = gen_new_qreg(QMODE_I32);
402 add = gen_addr_index(ext, tmp);
403 if (!IS_NULL_QREG(base)) {
404 tcg_gen_add_i32(tmp, add, base);
406 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
408 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
415 /* Update the CPU env CC_OP state. */
416 static inline void gen_flush_cc_op(DisasContext *s)
418 if (s->cc_op != CC_OP_DYNAMIC)
419 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
422 /* Evaluate all the CC flags. */
423 static inline void gen_flush_flags(DisasContext *s)
425 if (s->cc_op == CC_OP_FLAGS)
428 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
429 s->cc_op = CC_OP_FLAGS;
432 static void gen_logic_cc(DisasContext *s, TCGv val)
434 tcg_gen_mov_i32(QREG_CC_DEST, val);
435 s->cc_op = CC_OP_LOGIC;
438 static void gen_update_cc_add(TCGv dest, TCGv src)
440 tcg_gen_mov_i32(QREG_CC_DEST, dest);
441 tcg_gen_mov_i32(QREG_CC_SRC, src);
444 static inline int opsize_bytes(int opsize)
447 case OS_BYTE: return 1;
448 case OS_WORD: return 2;
449 case OS_LONG: return 4;
450 case OS_SINGLE: return 4;
451 case OS_DOUBLE: return 8;
453 qemu_assert(0, "bad operand size");
457 /* Assign value to a register. If the width is less than the register width
458 only the low part of the register is set. */
459 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
464 tcg_gen_andi_i32(reg, reg, 0xffffff00);
465 tmp = gen_new_qreg(QMODE_I32);
466 tcg_gen_ext8u_i32(tmp, val);
467 tcg_gen_or_i32(reg, reg, tmp);
470 tcg_gen_andi_i32(reg, reg, 0xffff0000);
471 tmp = gen_new_qreg(QMODE_I32);
472 tcg_gen_ext16u_i32(tmp, val);
473 tcg_gen_or_i32(reg, reg, tmp);
476 tcg_gen_mov_i32(reg, val);
479 gen_helper_pack_32_f32(reg, val);
482 qemu_assert(0, "Bad operand size");
487 /* Sign or zero extend a value. */
488 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
494 tmp = gen_new_qreg(QMODE_I32);
496 tcg_gen_ext8s_i32(tmp, val);
498 tcg_gen_ext8u_i32(tmp, val);
501 tmp = gen_new_qreg(QMODE_I32);
503 tcg_gen_ext16s_i32(tmp, val);
505 tcg_gen_ext16u_i32(tmp, val);
511 tmp = gen_new_qreg(QMODE_F32);
512 gen_helper_pack_f32_32(tmp, val);
515 qemu_assert(0, "Bad operand size");
520 /* Generate code for an "effective address". Does not adjust the base
521 register for autoincrememnt addressing modes. */
522 static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize)
529 switch ((insn >> 3) & 7) {
530 case 0: /* Data register direct. */
531 case 1: /* Address register direct. */
533 case 2: /* Indirect register */
534 case 3: /* Indirect postincrement. */
535 return AREG(insn, 0);
536 case 4: /* Indirect predecrememnt. */
538 tmp = gen_new_qreg(QMODE_I32);
539 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
541 case 5: /* Indirect displacement. */
543 tmp = gen_new_qreg(QMODE_I32);
544 ext = lduw_code(s->pc);
546 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
548 case 6: /* Indirect index + displacement. */
550 return gen_lea_indexed(s, opsize, reg);
553 case 0: /* Absolute short. */
554 offset = ldsw_code(s->pc);
556 return gen_im32(offset);
557 case 1: /* Absolute long. */
558 offset = read_im32(s);
559 return gen_im32(offset);
560 case 2: /* pc displacement */
561 tmp = gen_new_qreg(QMODE_I32);
563 offset += ldsw_code(s->pc);
565 return gen_im32(offset);
566 case 3: /* pc index+displacement. */
567 return gen_lea_indexed(s, opsize, NULL_QREG);
568 case 4: /* Immediate. */
573 /* Should never happen. */
577 /* Helper function for gen_ea. Reuse the computed address between the
578 for read/write operands. */
579 static inline TCGv gen_ea_once(DisasContext *s, uint16_t insn, int opsize,
580 TCGv val, TCGv *addrp, ea_what what)
584 if (addrp && what == EA_STORE) {
587 tmp = gen_lea(s, insn, opsize);
588 if (IS_NULL_QREG(tmp))
593 return gen_ldst(s, opsize, tmp, val, what);
596 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
597 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
598 ADDRP is non-null for readwrite operands. */
599 static TCGv gen_ea(DisasContext *s, uint16_t insn, int opsize, TCGv val,
600 TCGv *addrp, ea_what what)
606 switch ((insn >> 3) & 7) {
607 case 0: /* Data register direct. */
609 if (what == EA_STORE) {
610 gen_partset_reg(opsize, reg, val);
613 return gen_extend(reg, opsize, what == EA_LOADS);
615 case 1: /* Address register direct. */
617 if (what == EA_STORE) {
618 tcg_gen_mov_i32(reg, val);
621 return gen_extend(reg, opsize, what == EA_LOADS);
623 case 2: /* Indirect register */
625 return gen_ldst(s, opsize, reg, val, what);
626 case 3: /* Indirect postincrement. */
628 result = gen_ldst(s, opsize, reg, val, what);
629 /* ??? This is not exception safe. The instruction may still
630 fault after this point. */
631 if (what == EA_STORE || !addrp)
632 tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
634 case 4: /* Indirect predecrememnt. */
637 if (addrp && what == EA_STORE) {
640 tmp = gen_lea(s, insn, opsize);
641 if (IS_NULL_QREG(tmp))
646 result = gen_ldst(s, opsize, tmp, val, what);
647 /* ??? This is not exception safe. The instruction may still
648 fault after this point. */
649 if (what == EA_STORE || !addrp) {
651 tcg_gen_mov_i32(reg, tmp);
655 case 5: /* Indirect displacement. */
656 case 6: /* Indirect index + displacement. */
657 return gen_ea_once(s, insn, opsize, val, addrp, what);
660 case 0: /* Absolute short. */
661 case 1: /* Absolute long. */
662 case 2: /* pc displacement */
663 case 3: /* pc index+displacement. */
664 return gen_ea_once(s, insn, opsize, val, addrp, what);
665 case 4: /* Immediate. */
666 /* Sign extend values for consistency. */
669 if (what == EA_LOADS)
670 offset = ldsb_code(s->pc + 1);
672 offset = ldub_code(s->pc + 1);
676 if (what == EA_LOADS)
677 offset = ldsw_code(s->pc);
679 offset = lduw_code(s->pc);
683 offset = read_im32(s);
686 qemu_assert(0, "Bad immediate operand");
688 return tcg_const_i32(offset);
693 /* Should never happen. */
697 /* This generates a conditional branch, clobbering all temporaries. */
698 static void gen_jmpcc(DisasContext *s, int cond, int l1)
702 /* TODO: Optimize compare/branch pairs rather than always flushing
703 flag state to CC_OP_FLAGS. */
711 case 2: /* HI (!C && !Z) */
712 tmp = gen_new_qreg(QMODE_I32);
713 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
714 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
716 case 3: /* LS (C || Z) */
717 tmp = gen_new_qreg(QMODE_I32);
718 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
719 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
721 case 4: /* CC (!C) */
722 tmp = gen_new_qreg(QMODE_I32);
723 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
724 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
727 tmp = gen_new_qreg(QMODE_I32);
728 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
729 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
731 case 6: /* NE (!Z) */
732 tmp = gen_new_qreg(QMODE_I32);
733 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
734 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
737 tmp = gen_new_qreg(QMODE_I32);
738 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
739 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
741 case 8: /* VC (!V) */
742 tmp = gen_new_qreg(QMODE_I32);
743 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
744 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
747 tmp = gen_new_qreg(QMODE_I32);
748 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
749 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
751 case 10: /* PL (!N) */
752 tmp = gen_new_qreg(QMODE_I32);
753 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
754 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
756 case 11: /* MI (N) */
757 tmp = gen_new_qreg(QMODE_I32);
758 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
759 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
761 case 12: /* GE (!(N ^ V)) */
762 tmp = gen_new_qreg(QMODE_I32);
763 assert(CCF_V == (CCF_N >> 2));
764 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
765 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
766 tcg_gen_andi_i32(tmp, tmp, CCF_V);
767 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
769 case 13: /* LT (N ^ V) */
770 tmp = gen_new_qreg(QMODE_I32);
771 assert(CCF_V == (CCF_N >> 2));
772 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
773 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
774 tcg_gen_andi_i32(tmp, tmp, CCF_V);
775 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
777 case 14: /* GT (!(Z || (N ^ V))) */
778 tmp = gen_new_qreg(QMODE_I32);
779 assert(CCF_V == (CCF_N >> 2));
780 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
781 tcg_gen_shri_i32(tmp, tmp, 2);
782 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
783 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
784 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
786 case 15: /* LE (Z || (N ^ V)) */
787 tmp = gen_new_qreg(QMODE_I32);
788 assert(CCF_V == (CCF_N >> 2));
789 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
790 tcg_gen_shri_i32(tmp, tmp, 2);
791 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
792 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
793 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
796 /* Should ever happen. */
807 l1 = gen_new_label();
808 cond = (insn >> 8) & 0xf;
810 tcg_gen_andi_i32(reg, reg, 0xffffff00);
811 /* This is safe because we modify the reg directly, with no other values
813 gen_jmpcc(s, cond ^ 1, l1);
814 tcg_gen_ori_i32(reg, reg, 0xff);
818 /* Force a TB lookup after an instruction that changes the CPU state. */
819 static void gen_lookup_tb(DisasContext *s)
822 tcg_gen_movi_i32(QREG_PC, s->pc);
823 s->is_jmp = DISAS_UPDATE;
826 /* Generate a jump to an immediate address. */
827 static void gen_jmp_im(DisasContext *s, uint32_t dest)
830 tcg_gen_movi_i32(QREG_PC, dest);
831 s->is_jmp = DISAS_JUMP;
834 /* Generate a jump to the address in qreg DEST. */
835 static void gen_jmp(DisasContext *s, TCGv dest)
838 tcg_gen_mov_i32(QREG_PC, dest);
839 s->is_jmp = DISAS_JUMP;
842 static void gen_exception(DisasContext *s, uint32_t where, int nr)
845 gen_jmp_im(s, where);
846 gen_helper_raise_exception(tcg_const_i32(nr));
849 static inline void gen_addr_fault(DisasContext *s)
851 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
854 #define SRC_EA(result, opsize, op_sign, addrp) do { \
855 result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
856 if (IS_NULL_QREG(result)) { \
862 #define DEST_EA(insn, opsize, val, addrp) do { \
863 TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
864 if (IS_NULL_QREG(ea_result)) { \
870 /* Generate a jump to an immediate address. */
871 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
873 TranslationBlock *tb;
876 if (unlikely(s->singlestep_enabled)) {
877 gen_exception(s, dest, EXCP_DEBUG);
878 } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
879 (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
881 tcg_gen_movi_i32(QREG_PC, dest);
882 tcg_gen_exit_tb((long)tb + n);
887 s->is_jmp = DISAS_TB_JUMP;
890 DISAS_INSN(undef_mac)
892 gen_exception(s, s->pc - 2, EXCP_LINEA);
895 DISAS_INSN(undef_fpu)
897 gen_exception(s, s->pc - 2, EXCP_LINEF);
902 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
903 cpu_abort(cpu_single_env, "Illegal instruction: %04x @ %08x",
914 sign = (insn & 0x100) != 0;
916 tmp = gen_new_qreg(QMODE_I32);
918 tcg_gen_ext16s_i32(tmp, reg);
920 tcg_gen_ext16u_i32(tmp, reg);
921 SRC_EA(src, OS_WORD, sign, NULL);
922 tcg_gen_mul_i32(tmp, tmp, src);
923 tcg_gen_mov_i32(reg, tmp);
924 /* Unlike m68k, coldfire always clears the overflow bit. */
925 gen_logic_cc(s, tmp);
935 sign = (insn & 0x100) != 0;
938 tcg_gen_ext16s_i32(QREG_DIV1, reg);
940 tcg_gen_ext16u_i32(QREG_DIV1, reg);
942 SRC_EA(src, OS_WORD, sign, NULL);
943 tcg_gen_mov_i32(QREG_DIV2, src);
945 gen_helper_divs(cpu_env, tcg_const_i32(1));
947 gen_helper_divu(cpu_env, tcg_const_i32(1));
950 tmp = gen_new_qreg(QMODE_I32);
951 src = gen_new_qreg(QMODE_I32);
952 tcg_gen_ext16u_i32(tmp, QREG_DIV1);
953 tcg_gen_shli_i32(src, QREG_DIV2, 16);
954 tcg_gen_or_i32(reg, tmp, src);
955 s->cc_op = CC_OP_FLAGS;
965 ext = lduw_code(s->pc);
968 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
973 tcg_gen_mov_i32(QREG_DIV1, num);
974 SRC_EA(den, OS_LONG, 0, NULL);
975 tcg_gen_mov_i32(QREG_DIV2, den);
977 gen_helper_divs(cpu_env, tcg_const_i32(0));
979 gen_helper_divu(cpu_env, tcg_const_i32(0));
981 if ((ext & 7) == ((ext >> 12) & 7)) {
983 tcg_gen_mov_i32 (reg, QREG_DIV1);
986 tcg_gen_mov_i32 (reg, QREG_DIV2);
988 s->cc_op = CC_OP_FLAGS;
1000 add = (insn & 0x4000) != 0;
1001 reg = DREG(insn, 9);
1002 dest = gen_new_qreg(QMODE_I32);
1004 SRC_EA(tmp, OS_LONG, 0, &addr);
1008 SRC_EA(src, OS_LONG, 0, NULL);
1011 tcg_gen_add_i32(dest, tmp, src);
1012 gen_helper_xflag_lt(QREG_CC_X, dest, src);
1013 s->cc_op = CC_OP_ADD;
1015 gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1016 tcg_gen_sub_i32(dest, tmp, src);
1017 s->cc_op = CC_OP_SUB;
1019 gen_update_cc_add(dest, src);
1021 DEST_EA(insn, OS_LONG, dest, &addr);
1023 tcg_gen_mov_i32(reg, dest);
1028 /* Reverse the order of the bits in REG. */
1032 reg = DREG(insn, 0);
1033 gen_helper_bitrev(reg, reg);
1036 DISAS_INSN(bitop_reg)
1046 if ((insn & 0x38) != 0)
1050 op = (insn >> 6) & 3;
1051 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1052 src2 = DREG(insn, 9);
1053 dest = gen_new_qreg(QMODE_I32);
1056 tmp = gen_new_qreg(QMODE_I32);
1057 if (opsize == OS_BYTE)
1058 tcg_gen_andi_i32(tmp, src2, 7);
1060 tcg_gen_andi_i32(tmp, src2, 31);
1062 tmp = gen_new_qreg(QMODE_I32);
1063 tcg_gen_shr_i32(tmp, src1, src2);
1064 tcg_gen_andi_i32(tmp, tmp, 1);
1065 tcg_gen_shli_i32(tmp, tmp, 2);
1066 /* Clear CCF_Z if bit set. */
1067 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1068 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1070 tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1073 tcg_gen_xor_i32(dest, src1, tmp);
1076 tcg_gen_not_i32(tmp, tmp);
1077 tcg_gen_and_i32(dest, src1, tmp);
1080 tcg_gen_or_i32(dest, src1, tmp);
1086 DEST_EA(insn, opsize, dest, &addr);
1092 reg = DREG(insn, 0);
1094 gen_helper_sats(reg, reg, QREG_CC_DEST);
1095 gen_logic_cc(s, reg);
1098 static void gen_push(DisasContext *s, TCGv val)
1102 tmp = gen_new_qreg(QMODE_I32);
1103 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1104 gen_store(s, OS_LONG, tmp, val);
1105 tcg_gen_mov_i32(QREG_SP, tmp);
1117 mask = lduw_code(s->pc);
1119 tmp = gen_lea(s, insn, OS_LONG);
1120 if (IS_NULL_QREG(tmp)) {
1124 addr = gen_new_qreg(QMODE_I32);
1125 tcg_gen_mov_i32(addr, tmp);
1126 is_load = ((insn & 0x0400) != 0);
1127 for (i = 0; i < 16; i++, mask >>= 1) {
1134 tmp = gen_load(s, OS_LONG, addr, 0);
1135 tcg_gen_mov_i32(reg, tmp);
1137 gen_store(s, OS_LONG, addr, reg);
1140 tcg_gen_addi_i32(addr, addr, 4);
1145 DISAS_INSN(bitop_im)
1155 if ((insn & 0x38) != 0)
1159 op = (insn >> 6) & 3;
1161 bitnum = lduw_code(s->pc);
1163 if (bitnum & 0xff00) {
1164 disas_undef(s, insn);
1168 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1171 if (opsize == OS_BYTE)
1177 tmp = gen_new_qreg(QMODE_I32);
1178 assert (CCF_Z == (1 << 2));
1180 tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1181 else if (bitnum < 2)
1182 tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1184 tcg_gen_mov_i32(tmp, src1);
1185 tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1186 /* Clear CCF_Z if bit set. */
1187 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1188 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1192 tcg_gen_xori_i32(tmp, src1, mask);
1195 tcg_gen_andi_i32(tmp, src1, ~mask);
1198 tcg_gen_ori_i32(tmp, src1, mask);
1203 DEST_EA(insn, opsize, tmp, &addr);
1207 DISAS_INSN(arith_im)
1215 op = (insn >> 9) & 7;
1216 SRC_EA(src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1218 dest = gen_new_qreg(QMODE_I32);
1221 tcg_gen_ori_i32(dest, src1, im);
1222 gen_logic_cc(s, dest);
1225 tcg_gen_andi_i32(dest, src1, im);
1226 gen_logic_cc(s, dest);
1229 tcg_gen_mov_i32(dest, src1);
1230 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1231 tcg_gen_subi_i32(dest, dest, im);
1232 gen_update_cc_add(dest, gen_im32(im));
1233 s->cc_op = CC_OP_SUB;
1236 tcg_gen_mov_i32(dest, src1);
1237 tcg_gen_addi_i32(dest, dest, im);
1238 gen_update_cc_add(dest, gen_im32(im));
1239 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1240 s->cc_op = CC_OP_ADD;
1243 tcg_gen_xori_i32(dest, src1, im);
1244 gen_logic_cc(s, dest);
1247 tcg_gen_mov_i32(dest, src1);
1248 tcg_gen_subi_i32(dest, dest, im);
1249 gen_update_cc_add(dest, gen_im32(im));
1250 s->cc_op = CC_OP_SUB;
1256 DEST_EA(insn, OS_LONG, dest, &addr);
1264 reg = DREG(insn, 0);
1265 tcg_gen_bswap_i32(reg, reg);
1275 switch (insn >> 12) {
1276 case 1: /* move.b */
1279 case 2: /* move.l */
1282 case 3: /* move.w */
1288 SRC_EA(src, opsize, 1, NULL);
1289 op = (insn >> 6) & 7;
1292 /* The value will already have been sign extended. */
1293 dest = AREG(insn, 9);
1294 tcg_gen_mov_i32(dest, src);
1298 dest_ea = ((insn >> 9) & 7) | (op << 3);
1299 DEST_EA(dest_ea, opsize, src, NULL);
1300 /* This will be correct because loads sign extend. */
1301 gen_logic_cc(s, src);
1310 reg = DREG(insn, 0);
1311 gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1319 reg = AREG(insn, 9);
1320 tmp = gen_lea(s, insn, OS_LONG);
1321 if (IS_NULL_QREG(tmp)) {
1325 tcg_gen_mov_i32(reg, tmp);
1332 switch ((insn >> 6) & 3) {
1345 DEST_EA(insn, opsize, gen_im32(0), NULL);
1346 gen_logic_cc(s, gen_im32(0));
1349 static TCGv gen_get_ccr(DisasContext *s)
1354 dest = gen_new_qreg(QMODE_I32);
1355 tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1356 tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1360 DISAS_INSN(move_from_ccr)
1365 ccr = gen_get_ccr(s);
1366 reg = DREG(insn, 0);
1367 gen_partset_reg(OS_WORD, reg, ccr);
1375 reg = DREG(insn, 0);
1376 src1 = gen_new_qreg(QMODE_I32);
1377 tcg_gen_mov_i32(src1, reg);
1378 tcg_gen_neg_i32(reg, src1);
1379 s->cc_op = CC_OP_SUB;
1380 gen_update_cc_add(reg, src1);
1381 gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1382 s->cc_op = CC_OP_SUB;
1385 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1387 tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1388 tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1390 gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1394 static void gen_set_sr(DisasContext *s, uint16_t insn, int ccr_only)
1399 s->cc_op = CC_OP_FLAGS;
1400 if ((insn & 0x38) == 0)
1402 tmp = gen_new_qreg(QMODE_I32);
1403 reg = DREG(insn, 0);
1404 tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1405 tcg_gen_shri_i32(tmp, reg, 4);
1406 tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1408 gen_helper_set_sr(cpu_env, reg);
1411 else if ((insn & 0x3f) == 0x3c)
1414 val = lduw_code(s->pc);
1416 gen_set_sr_im(s, val, ccr_only);
1419 disas_undef(s, insn);
1422 DISAS_INSN(move_to_ccr)
1424 gen_set_sr(s, insn, 1);
1431 reg = DREG(insn, 0);
1432 tcg_gen_not_i32(reg, reg);
1433 gen_logic_cc(s, reg);
1442 src1 = gen_new_qreg(QMODE_I32);
1443 src2 = gen_new_qreg(QMODE_I32);
1444 reg = DREG(insn, 0);
1445 tcg_gen_shli_i32(src1, reg, 16);
1446 tcg_gen_shri_i32(src2, reg, 16);
1447 tcg_gen_or_i32(reg, src1, src2);
1448 gen_logic_cc(s, reg);
1455 tmp = gen_lea(s, insn, OS_LONG);
1456 if (IS_NULL_QREG(tmp)) {
1469 reg = DREG(insn, 0);
1470 op = (insn >> 6) & 7;
1471 tmp = gen_new_qreg(QMODE_I32);
1473 tcg_gen_ext16s_i32(tmp, reg);
1475 tcg_gen_ext8s_i32(tmp, reg);
1477 gen_partset_reg(OS_WORD, reg, tmp);
1479 tcg_gen_mov_i32(reg, tmp);
1480 gen_logic_cc(s, tmp);
1488 switch ((insn >> 6) & 3) {
1501 SRC_EA(tmp, opsize, 1, NULL);
1502 gen_logic_cc(s, tmp);
1507 /* Implemented as a NOP. */
1512 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1515 /* ??? This should be atomic. */
1522 dest = gen_new_qreg(QMODE_I32);
1523 SRC_EA(src1, OS_BYTE, 1, &addr);
1524 gen_logic_cc(s, src1);
1525 tcg_gen_ori_i32(dest, src1, 0x80);
1526 DEST_EA(insn, OS_BYTE, dest, &addr);
1536 /* The upper 32 bits of the product are discarded, so
1537 muls.l and mulu.l are functionally equivalent. */
1538 ext = lduw_code(s->pc);
1541 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1544 reg = DREG(ext, 12);
1545 SRC_EA(src1, OS_LONG, 0, NULL);
1546 dest = gen_new_qreg(QMODE_I32);
1547 tcg_gen_mul_i32(dest, src1, reg);
1548 tcg_gen_mov_i32(reg, dest);
1549 /* Unlike m68k, coldfire always clears the overflow bit. */
1550 gen_logic_cc(s, dest);
1559 offset = ldsw_code(s->pc);
1561 reg = AREG(insn, 0);
1562 tmp = gen_new_qreg(QMODE_I32);
1563 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1564 gen_store(s, OS_LONG, tmp, reg);
1565 if ((insn & 7) != 7)
1566 tcg_gen_mov_i32(reg, tmp);
1567 tcg_gen_addi_i32(QREG_SP, tmp, offset);
1576 src = gen_new_qreg(QMODE_I32);
1577 reg = AREG(insn, 0);
1578 tcg_gen_mov_i32(src, reg);
1579 tmp = gen_load(s, OS_LONG, src, 0);
1580 tcg_gen_mov_i32(reg, tmp);
1581 tcg_gen_addi_i32(QREG_SP, src, 4);
1592 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1593 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1601 /* Load the target address first to ensure correct exception
1603 tmp = gen_lea(s, insn, OS_LONG);
1604 if (IS_NULL_QREG(tmp)) {
1608 if ((insn & 0x40) == 0) {
1610 gen_push(s, gen_im32(s->pc));
1623 SRC_EA(src1, OS_LONG, 0, &addr);
1624 val = (insn >> 9) & 7;
1627 dest = gen_new_qreg(QMODE_I32);
1628 tcg_gen_mov_i32(dest, src1);
1629 if ((insn & 0x38) == 0x08) {
1630 /* Don't update condition codes if the destination is an
1631 address register. */
1632 if (insn & 0x0100) {
1633 tcg_gen_subi_i32(dest, dest, val);
1635 tcg_gen_addi_i32(dest, dest, val);
1638 src2 = gen_im32(val);
1639 if (insn & 0x0100) {
1640 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1641 tcg_gen_subi_i32(dest, dest, val);
1642 s->cc_op = CC_OP_SUB;
1644 tcg_gen_addi_i32(dest, dest, val);
1645 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1646 s->cc_op = CC_OP_ADD;
1648 gen_update_cc_add(dest, src2);
1650 DEST_EA(insn, OS_LONG, dest, &addr);
1656 case 2: /* One extension word. */
1659 case 3: /* Two extension words. */
1662 case 4: /* No extension words. */
1665 disas_undef(s, insn);
1677 op = (insn >> 8) & 0xf;
1678 offset = (int8_t)insn;
1680 offset = ldsw_code(s->pc);
1682 } else if (offset == -1) {
1683 offset = read_im32(s);
1687 gen_push(s, gen_im32(s->pc));
1692 l1 = gen_new_label();
1693 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1694 gen_jmp_tb(s, 1, base + offset);
1696 gen_jmp_tb(s, 0, s->pc);
1698 /* Unconditional branch. */
1699 gen_jmp_tb(s, 0, base + offset);
1708 tcg_gen_movi_i32(DREG(insn, 9), val);
1709 gen_logic_cc(s, tcg_const_i32(val));
1722 SRC_EA(src, opsize, (insn & 0x80) != 0, NULL);
1723 reg = DREG(insn, 9);
1724 tcg_gen_mov_i32(reg, src);
1725 gen_logic_cc(s, src);
1735 reg = DREG(insn, 9);
1736 dest = gen_new_qreg(QMODE_I32);
1738 SRC_EA(src, OS_LONG, 0, &addr);
1739 tcg_gen_or_i32(dest, src, reg);
1740 DEST_EA(insn, OS_LONG, dest, &addr);
1742 SRC_EA(src, OS_LONG, 0, NULL);
1743 tcg_gen_or_i32(dest, src, reg);
1744 tcg_gen_mov_i32(reg, dest);
1746 gen_logic_cc(s, dest);
1754 SRC_EA(src, OS_LONG, 0, NULL);
1755 reg = AREG(insn, 9);
1756 tcg_gen_sub_i32(reg, reg, src);
1765 reg = DREG(insn, 9);
1766 src = DREG(insn, 0);
1767 gen_helper_subx_cc(reg, cpu_env, reg, src);
1775 val = (insn >> 9) & 7;
1778 src = gen_im32(val);
1779 gen_logic_cc(s, src);
1780 DEST_EA(insn, OS_LONG, src, NULL);
1791 op = (insn >> 6) & 3;
1795 s->cc_op = CC_OP_CMPB;
1799 s->cc_op = CC_OP_CMPW;
1803 s->cc_op = CC_OP_SUB;
1808 SRC_EA(src, opsize, 1, NULL);
1809 reg = DREG(insn, 9);
1810 dest = gen_new_qreg(QMODE_I32);
1811 tcg_gen_sub_i32(dest, reg, src);
1812 gen_update_cc_add(dest, src);
1827 SRC_EA(src, opsize, 1, NULL);
1828 reg = AREG(insn, 9);
1829 dest = gen_new_qreg(QMODE_I32);
1830 tcg_gen_sub_i32(dest, reg, src);
1831 gen_update_cc_add(dest, src);
1832 s->cc_op = CC_OP_SUB;
1842 SRC_EA(src, OS_LONG, 0, &addr);
1843 reg = DREG(insn, 9);
1844 dest = gen_new_qreg(QMODE_I32);
1845 tcg_gen_xor_i32(dest, src, reg);
1846 gen_logic_cc(s, dest);
1847 DEST_EA(insn, OS_LONG, dest, &addr);
1857 reg = DREG(insn, 9);
1858 dest = gen_new_qreg(QMODE_I32);
1860 SRC_EA(src, OS_LONG, 0, &addr);
1861 tcg_gen_and_i32(dest, src, reg);
1862 DEST_EA(insn, OS_LONG, dest, &addr);
1864 SRC_EA(src, OS_LONG, 0, NULL);
1865 tcg_gen_and_i32(dest, src, reg);
1866 tcg_gen_mov_i32(reg, dest);
1868 gen_logic_cc(s, dest);
1876 SRC_EA(src, OS_LONG, 0, NULL);
1877 reg = AREG(insn, 9);
1878 tcg_gen_add_i32(reg, reg, src);
1887 reg = DREG(insn, 9);
1888 src = DREG(insn, 0);
1889 gen_helper_addx_cc(reg, cpu_env, reg, src);
1890 s->cc_op = CC_OP_FLAGS;
1893 /* TODO: This could be implemented without helper functions. */
1894 DISAS_INSN(shift_im)
1900 reg = DREG(insn, 0);
1901 tmp = (insn >> 9) & 7;
1904 shift = gen_im32(tmp);
1905 /* No need to flush flags becuse we know we will set C flag. */
1907 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1910 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1912 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1915 s->cc_op = CC_OP_SHIFT;
1918 DISAS_INSN(shift_reg)
1923 reg = DREG(insn, 0);
1924 shift = DREG(insn, 9);
1925 /* Shift by zero leaves C flag unmodified. */
1928 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1931 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1933 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1936 s->cc_op = CC_OP_SHIFT;
1942 reg = DREG(insn, 0);
1943 gen_logic_cc(s, reg);
1944 gen_helper_ff1(reg, reg);
1947 static TCGv gen_get_sr(DisasContext *s)
1952 ccr = gen_get_ccr(s);
1953 sr = gen_new_qreg(QMODE_I32);
1954 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1955 tcg_gen_or_i32(sr, sr, ccr);
1965 ext = lduw_code(s->pc);
1967 if (ext != 0x46FC) {
1968 gen_exception(s, addr, EXCP_UNSUPPORTED);
1971 ext = lduw_code(s->pc);
1973 if (IS_USER(s) || (ext & SR_S) == 0) {
1974 gen_exception(s, addr, EXCP_PRIVILEGE);
1977 gen_push(s, gen_get_sr(s));
1978 gen_set_sr_im(s, ext, 0);
1981 DISAS_INSN(move_from_sr)
1987 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1991 reg = DREG(insn, 0);
1992 gen_partset_reg(OS_WORD, reg, sr);
1995 DISAS_INSN(move_to_sr)
1998 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2001 gen_set_sr(s, insn, 0);
2005 DISAS_INSN(move_from_usp)
2008 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2011 /* TODO: Implement USP. */
2012 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2015 DISAS_INSN(move_to_usp)
2018 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2021 /* TODO: Implement USP. */
2022 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2027 gen_exception(s, s->pc, EXCP_HALT_INSN);
2035 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2039 ext = lduw_code(s->pc);
2042 gen_set_sr_im(s, ext, 0);
2043 tcg_gen_movi_i32(QREG_HALTED, 1);
2044 gen_exception(s, s->pc, EXCP_HLT);
2050 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2053 gen_exception(s, s->pc - 2, EXCP_RTE);
2062 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2066 ext = lduw_code(s->pc);
2070 reg = AREG(ext, 12);
2072 reg = DREG(ext, 12);
2074 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2081 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2084 /* ICache fetch. Implement as no-op. */
2090 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2093 /* Cache push/invalidate. Implement as no-op. */
2098 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2104 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2107 /* TODO: Implement wdebug. */
2108 qemu_assert(0, "WDEBUG not implemented");
2113 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2116 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2117 immediately before the next FP instruction is executed. */
2128 ext = lduw_code(s->pc);
2130 opmode = ext & 0x7f;
2131 switch ((ext >> 13) & 7) {
2136 case 3: /* fmove out */
2139 /* ??? TODO: Proper behavior on overflow. */
2140 switch ((ext >> 10) & 7) {
2143 res = gen_new_qreg(QMODE_I32);
2144 gen_helper_f64_to_i32(res, cpu_env, src);
2148 res = gen_new_qreg(QMODE_F32);
2149 gen_helper_f64_to_f32(res, cpu_env, src);
2153 res = gen_new_qreg(QMODE_I32);
2154 gen_helper_f64_to_i32(res, cpu_env, src);
2162 res = gen_new_qreg(QMODE_I32);
2163 gen_helper_f64_to_i32(res, cpu_env, src);
2168 DEST_EA(insn, opsize, res, NULL);
2170 case 4: /* fmove to control register. */
2171 switch ((ext >> 10) & 7) {
2173 /* Not implemented. Ignore writes. */
2178 cpu_abort(NULL, "Unimplemented: fmove to control %d",
2182 case 5: /* fmove from control register. */
2183 switch ((ext >> 10) & 7) {
2185 /* Not implemented. Always return zero. */
2191 cpu_abort(NULL, "Unimplemented: fmove from control %d",
2195 DEST_EA(insn, OS_LONG, res, NULL);
2197 case 6: /* fmovem */
2203 if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2205 src = gen_lea(s, insn, OS_LONG);
2206 if (IS_NULL_QREG(src)) {
2210 addr = gen_new_qreg(QMODE_I32);
2211 tcg_gen_mov_i32(addr, src);
2213 for (i = 0; i < 8; i++) {
2217 if (ext & (1 << 13)) {
2219 tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2222 tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2224 if (ext & (mask - 1))
2225 tcg_gen_addi_i32(addr, addr, 8);
2232 if (ext & (1 << 14)) {
2235 /* Source effective address. */
2236 switch ((ext >> 10) & 7) {
2237 case 0: opsize = OS_LONG; break;
2238 case 1: opsize = OS_SINGLE; break;
2239 case 4: opsize = OS_WORD; break;
2240 case 5: opsize = OS_DOUBLE; break;
2241 case 6: opsize = OS_BYTE; break;
2245 SRC_EA(tmp, opsize, 1, NULL);
2246 if (opsize == OS_DOUBLE) {
2249 src = gen_new_qreg(QMODE_F64);
2254 gen_helper_i32_to_f64(src, cpu_env, tmp);
2257 gen_helper_f32_to_f64(src, cpu_env, tmp);
2262 /* Source register. */
2263 src = FREG(ext, 10);
2265 dest = FREG(ext, 7);
2266 res = gen_new_qreg(QMODE_F64);
2268 tcg_gen_mov_f64(res, dest);
2271 case 0: case 0x40: case 0x44: /* fmove */
2272 tcg_gen_mov_f64(res, src);
2275 gen_helper_iround_f64(res, cpu_env, src);
2278 case 3: /* fintrz */
2279 gen_helper_itrunc_f64(res, cpu_env, src);
2282 case 4: case 0x41: case 0x45: /* fsqrt */
2283 gen_helper_sqrt_f64(res, cpu_env, src);
2285 case 0x18: case 0x58: case 0x5c: /* fabs */
2286 gen_helper_abs_f64(res, src);
2288 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2289 gen_helper_chs_f64(res, src);
2291 case 0x20: case 0x60: case 0x64: /* fdiv */
2292 gen_helper_div_f64(res, cpu_env, res, src);
2294 case 0x22: case 0x62: case 0x66: /* fadd */
2295 gen_helper_add_f64(res, cpu_env, res, src);
2297 case 0x23: case 0x63: case 0x67: /* fmul */
2298 gen_helper_mul_f64(res, cpu_env, res, src);
2300 case 0x28: case 0x68: case 0x6c: /* fsub */
2301 gen_helper_sub_f64(res, cpu_env, res, src);
2303 case 0x38: /* fcmp */
2304 gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2308 case 0x3a: /* ftst */
2309 tcg_gen_mov_f64(res, src);
2317 if (opmode & 0x40) {
2318 if ((opmode & 0x4) != 0)
2320 } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2327 tmp = gen_new_qreg(QMODE_F32);
2328 gen_helper_f64_to_f32(tmp, cpu_env, res);
2329 gen_helper_f32_to_f64(res, cpu_env, tmp);
2331 tcg_gen_mov_f64(QREG_FP_RESULT, res);
2332 if (!IS_NULL_QREG(dest)) {
2333 tcg_gen_mov_f64(dest, res);
2338 disas_undef_fpu(s, insn);
2349 offset = ldsw_code(s->pc);
2351 if (insn & (1 << 6)) {
2352 offset = (offset << 16) | lduw_code(s->pc);
2356 l1 = gen_new_label();
2357 /* TODO: Raise BSUN exception. */
2358 flag = gen_new_qreg(QMODE_I32);
2359 gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2360 /* Jump to l1 if condition is true. */
2361 switch (insn & 0xf) {
2364 case 1: /* eq (=0) */
2365 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2367 case 2: /* ogt (=1) */
2368 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2370 case 3: /* oge (=0 or =1) */
2371 tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2373 case 4: /* olt (=-1) */
2374 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2376 case 5: /* ole (=-1 or =0) */
2377 tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2379 case 6: /* ogl (=-1 or =1) */
2380 tcg_gen_andi_i32(flag, flag, 1);
2381 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2383 case 7: /* or (=2) */
2384 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2386 case 8: /* un (<2) */
2387 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2389 case 9: /* ueq (=0 or =2) */
2390 tcg_gen_andi_i32(flag, flag, 1);
2391 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2393 case 10: /* ugt (>0) */
2394 tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2396 case 11: /* uge (>=0) */
2397 tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2399 case 12: /* ult (=-1 or =2) */
2400 tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2402 case 13: /* ule (!=1) */
2403 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2405 case 14: /* ne (!=0) */
2406 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2412 gen_jmp_tb(s, 0, s->pc);
2414 gen_jmp_tb(s, 1, addr + offset);
2417 DISAS_INSN(frestore)
2419 /* TODO: Implement frestore. */
2420 qemu_assert(0, "FRESTORE not implemented");
2425 /* TODO: Implement fsave. */
2426 qemu_assert(0, "FSAVE not implemented");
2429 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2431 TCGv tmp = gen_new_qreg(QMODE_I32);
2432 if (s->env->macsr & MACSR_FI) {
2434 tcg_gen_andi_i32(tmp, val, 0xffff0000);
2436 tcg_gen_shli_i32(tmp, val, 16);
2437 } else if (s->env->macsr & MACSR_SU) {
2439 tcg_gen_sari_i32(tmp, val, 16);
2441 tcg_gen_ext16s_i32(tmp, val);
2444 tcg_gen_shri_i32(tmp, val, 16);
2446 tcg_gen_ext16u_i32(tmp, val);
2451 static void gen_mac_clear_flags(void)
2453 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2454 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2469 if (IS_NULL_QREG(s->mactmp))
2470 s->mactmp = tcg_temp_new(TCG_TYPE_I64);
2472 ext = lduw_code(s->pc);
2475 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2476 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2477 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2478 disas_undef(s, insn);
2482 /* MAC with load. */
2483 tmp = gen_lea(s, insn, OS_LONG);
2484 addr = gen_new_qreg(QMODE_I32);
2485 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2486 /* Load the value now to ensure correct exception behavior.
2487 Perform writeback after reading the MAC inputs. */
2488 loadval = gen_load(s, OS_LONG, addr, 0);
2491 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2492 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2494 loadval = addr = NULL_QREG;
2495 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2496 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2499 gen_mac_clear_flags();
2502 /* Disabled because conditional branches clobber temporary vars. */
2503 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2504 /* Skip the multiply if we know we will ignore it. */
2505 l1 = gen_new_label();
2506 tmp = gen_new_qreg(QMODE_I32);
2507 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2508 gen_op_jmp_nz32(tmp, l1);
2512 if ((ext & 0x0800) == 0) {
2514 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2515 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2517 if (s->env->macsr & MACSR_FI) {
2518 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2520 if (s->env->macsr & MACSR_SU)
2521 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2523 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2524 switch ((ext >> 9) & 3) {
2526 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2529 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2535 /* Save the overflow flag from the multiply. */
2536 saved_flags = gen_new_qreg(QMODE_I32);
2537 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2539 saved_flags = NULL_QREG;
2543 /* Disabled because conditional branches clobber temporary vars. */
2544 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2545 /* Skip the accumulate if the value is already saturated. */
2546 l1 = gen_new_label();
2547 tmp = gen_new_qreg(QMODE_I32);
2548 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2549 gen_op_jmp_nz32(tmp, l1);
2554 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2556 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2558 if (s->env->macsr & MACSR_FI)
2559 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2560 else if (s->env->macsr & MACSR_SU)
2561 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2563 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2566 /* Disabled because conditional branches clobber temporary vars. */
2572 /* Dual accumulate variant. */
2573 acc = (ext >> 2) & 3;
2574 /* Restore the overflow flag from the multiplier. */
2575 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2577 /* Disabled because conditional branches clobber temporary vars. */
2578 if ((s->env->macsr & MACSR_OMC) != 0) {
2579 /* Skip the accumulate if the value is already saturated. */
2580 l1 = gen_new_label();
2581 tmp = gen_new_qreg(QMODE_I32);
2582 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2583 gen_op_jmp_nz32(tmp, l1);
2587 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2589 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2590 if (s->env->macsr & MACSR_FI)
2591 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2592 else if (s->env->macsr & MACSR_SU)
2593 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2595 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2597 /* Disabled because conditional branches clobber temporary vars. */
2602 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2606 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2607 tcg_gen_mov_i32(rw, loadval);
2608 /* FIXME: Should address writeback happen with the masked or
2610 switch ((insn >> 3) & 7) {
2611 case 3: /* Post-increment. */
2612 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2614 case 4: /* Pre-decrement. */
2615 tcg_gen_mov_i32(AREG(insn, 0), addr);
2620 DISAS_INSN(from_mac)
2626 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2627 accnum = (insn >> 9) & 3;
2628 acc = MACREG(accnum);
2629 if (s->env->macsr & MACSR_FI) {
2630 gen_helper_get_macf(cpu_env, rx, acc);
2631 } else if ((s->env->macsr & MACSR_OMC) == 0) {
2632 tcg_gen_trunc_i64_i32(rx, acc);
2633 } else if (s->env->macsr & MACSR_SU) {
2634 gen_helper_get_macs(rx, acc);
2636 gen_helper_get_macu(rx, acc);
2639 tcg_gen_movi_i64(acc, 0);
2640 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2644 DISAS_INSN(move_mac)
2646 /* FIXME: This can be done without a helper. */
2650 dest = tcg_const_i32((insn >> 9) & 3);
2651 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2652 gen_mac_clear_flags();
2653 gen_helper_mac_set_flags(cpu_env, dest);
2656 DISAS_INSN(from_macsr)
2660 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2661 tcg_gen_mov_i32(reg, QREG_MACSR);
2664 DISAS_INSN(from_mask)
2667 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2668 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2671 DISAS_INSN(from_mext)
2675 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2676 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2677 if (s->env->macsr & MACSR_FI)
2678 gen_helper_get_mac_extf(reg, cpu_env, acc);
2680 gen_helper_get_mac_exti(reg, cpu_env, acc);
2683 DISAS_INSN(macsr_to_ccr)
2685 tcg_gen_movi_i32(QREG_CC_X, 0);
2686 tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2687 s->cc_op = CC_OP_FLAGS;
2695 accnum = (insn >> 9) & 3;
2696 acc = MACREG(accnum);
2697 SRC_EA(val, OS_LONG, 0, NULL);
2698 if (s->env->macsr & MACSR_FI) {
2699 tcg_gen_ext_i32_i64(acc, val);
2700 tcg_gen_shli_i64(acc, acc, 8);
2701 } else if (s->env->macsr & MACSR_SU) {
2702 tcg_gen_ext_i32_i64(acc, val);
2704 tcg_gen_extu_i32_i64(acc, val);
2706 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2707 gen_mac_clear_flags();
2708 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2711 DISAS_INSN(to_macsr)
2714 SRC_EA(val, OS_LONG, 0, NULL);
2715 gen_helper_set_macsr(cpu_env, val);
2722 SRC_EA(val, OS_LONG, 0, NULL);
2723 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2730 SRC_EA(val, OS_LONG, 0, NULL);
2731 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2732 if (s->env->macsr & MACSR_FI)
2733 gen_helper_set_mac_extf(cpu_env, val, acc);
2734 else if (s->env->macsr & MACSR_SU)
2735 gen_helper_set_mac_exts(cpu_env, val, acc);
2737 gen_helper_set_mac_extu(cpu_env, val, acc);
2740 static disas_proc opcode_table[65536];
2743 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2749 /* Sanity check. All set bits must be included in the mask. */
2750 if (opcode & ~mask) {
2752 "qemu internal error: bogus opcode definition %04x/%04x\n",
2756 /* This could probably be cleverer. For now just optimize the case where
2757 the top bits are known. */
2758 /* Find the first zero bit in the mask. */
2760 while ((i & mask) != 0)
2762 /* Iterate over all combinations of this and lower bits. */
2767 from = opcode & ~(i - 1);
2769 for (i = from; i < to; i++) {
2770 if ((i & mask) == opcode)
2771 opcode_table[i] = proc;
2775 /* Register m68k opcode handlers. Order is important.
2776 Later insn override earlier ones. */
2777 void register_m68k_insns (CPUM68KState *env)
2779 #define INSN(name, opcode, mask, feature) do { \
2780 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2781 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2783 INSN(undef, 0000, 0000, CF_ISA_A);
2784 INSN(arith_im, 0080, fff8, CF_ISA_A);
2785 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
2786 INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2787 INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2788 INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2789 INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2790 INSN(arith_im, 0280, fff8, CF_ISA_A);
2791 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
2792 INSN(arith_im, 0480, fff8, CF_ISA_A);
2793 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
2794 INSN(arith_im, 0680, fff8, CF_ISA_A);
2795 INSN(bitop_im, 0800, ffc0, CF_ISA_A);
2796 INSN(bitop_im, 0840, ffc0, CF_ISA_A);
2797 INSN(bitop_im, 0880, ffc0, CF_ISA_A);
2798 INSN(bitop_im, 08c0, ffc0, CF_ISA_A);
2799 INSN(arith_im, 0a80, fff8, CF_ISA_A);
2800 INSN(arith_im, 0c00, ff38, CF_ISA_A);
2801 INSN(move, 1000, f000, CF_ISA_A);
2802 INSN(move, 2000, f000, CF_ISA_A);
2803 INSN(move, 3000, f000, CF_ISA_A);
2804 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
2805 INSN(negx, 4080, fff8, CF_ISA_A);
2806 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2807 INSN(lea, 41c0, f1c0, CF_ISA_A);
2808 INSN(clr, 4200, ff00, CF_ISA_A);
2809 INSN(undef, 42c0, ffc0, CF_ISA_A);
2810 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2811 INSN(neg, 4480, fff8, CF_ISA_A);
2812 INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2813 INSN(not, 4680, fff8, CF_ISA_A);
2814 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2815 INSN(pea, 4840, ffc0, CF_ISA_A);
2816 INSN(swap, 4840, fff8, CF_ISA_A);
2817 INSN(movem, 48c0, fbc0, CF_ISA_A);
2818 INSN(ext, 4880, fff8, CF_ISA_A);
2819 INSN(ext, 48c0, fff8, CF_ISA_A);
2820 INSN(ext, 49c0, fff8, CF_ISA_A);
2821 INSN(tst, 4a00, ff00, CF_ISA_A);
2822 INSN(tas, 4ac0, ffc0, CF_ISA_B);
2823 INSN(halt, 4ac8, ffff, CF_ISA_A);
2824 INSN(pulse, 4acc, ffff, CF_ISA_A);
2825 INSN(illegal, 4afc, ffff, CF_ISA_A);
2826 INSN(mull, 4c00, ffc0, CF_ISA_A);
2827 INSN(divl, 4c40, ffc0, CF_ISA_A);
2828 INSN(sats, 4c80, fff8, CF_ISA_B);
2829 INSN(trap, 4e40, fff0, CF_ISA_A);
2830 INSN(link, 4e50, fff8, CF_ISA_A);
2831 INSN(unlk, 4e58, fff8, CF_ISA_A);
2832 INSN(move_to_usp, 4e60, fff8, USP);
2833 INSN(move_from_usp, 4e68, fff8, USP);
2834 INSN(nop, 4e71, ffff, CF_ISA_A);
2835 INSN(stop, 4e72, ffff, CF_ISA_A);
2836 INSN(rte, 4e73, ffff, CF_ISA_A);
2837 INSN(rts, 4e75, ffff, CF_ISA_A);
2838 INSN(movec, 4e7b, ffff, CF_ISA_A);
2839 INSN(jump, 4e80, ffc0, CF_ISA_A);
2840 INSN(jump, 4ec0, ffc0, CF_ISA_A);
2841 INSN(addsubq, 5180, f1c0, CF_ISA_A);
2842 INSN(scc, 50c0, f0f8, CF_ISA_A);
2843 INSN(addsubq, 5080, f1c0, CF_ISA_A);
2844 INSN(tpf, 51f8, fff8, CF_ISA_A);
2846 /* Branch instructions. */
2847 INSN(branch, 6000, f000, CF_ISA_A);
2848 /* Disable long branch instructions, then add back the ones we want. */
2849 INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */
2850 INSN(branch, 60ff, f0ff, CF_ISA_B);
2851 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
2852 INSN(branch, 60ff, ffff, BRAL);
2854 INSN(moveq, 7000, f100, CF_ISA_A);
2855 INSN(mvzs, 7100, f100, CF_ISA_B);
2856 INSN(or, 8000, f000, CF_ISA_A);
2857 INSN(divw, 80c0, f0c0, CF_ISA_A);
2858 INSN(addsub, 9000, f000, CF_ISA_A);
2859 INSN(subx, 9180, f1f8, CF_ISA_A);
2860 INSN(suba, 91c0, f1c0, CF_ISA_A);
2862 INSN(undef_mac, a000, f000, CF_ISA_A);
2863 INSN(mac, a000, f100, CF_EMAC);
2864 INSN(from_mac, a180, f9b0, CF_EMAC);
2865 INSN(move_mac, a110, f9fc, CF_EMAC);
2866 INSN(from_macsr,a980, f9f0, CF_EMAC);
2867 INSN(from_mask, ad80, fff0, CF_EMAC);
2868 INSN(from_mext, ab80, fbf0, CF_EMAC);
2869 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2870 INSN(to_mac, a100, f9c0, CF_EMAC);
2871 INSN(to_macsr, a900, ffc0, CF_EMAC);
2872 INSN(to_mext, ab00, fbc0, CF_EMAC);
2873 INSN(to_mask, ad00, ffc0, CF_EMAC);
2875 INSN(mov3q, a140, f1c0, CF_ISA_B);
2876 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
2877 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
2878 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2879 INSN(cmp, b080, f1c0, CF_ISA_A);
2880 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
2881 INSN(eor, b180, f1c0, CF_ISA_A);
2882 INSN(and, c000, f000, CF_ISA_A);
2883 INSN(mulw, c0c0, f0c0, CF_ISA_A);
2884 INSN(addsub, d000, f000, CF_ISA_A);
2885 INSN(addx, d180, f1f8, CF_ISA_A);
2886 INSN(adda, d1c0, f1c0, CF_ISA_A);
2887 INSN(shift_im, e080, f0f0, CF_ISA_A);
2888 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2889 INSN(undef_fpu, f000, f000, CF_ISA_A);
2890 INSN(fpu, f200, ffc0, CF_FPU);
2891 INSN(fbcc, f280, ffc0, CF_FPU);
2892 INSN(frestore, f340, ffc0, CF_FPU);
2893 INSN(fsave, f340, ffc0, CF_FPU);
2894 INSN(intouch, f340, ffc0, CF_ISA_A);
2895 INSN(cpushl, f428, ff38, CF_ISA_A);
2896 INSN(wddata, fb00, ff00, CF_ISA_A);
2897 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
2901 /* ??? Some of this implementation is not exception safe. We should always
2902 write back the result to memory before setting the condition codes. */
2903 static void disas_m68k_insn(CPUState * env, DisasContext *s)
2907 insn = lduw_code(s->pc);
2910 opcode_table[insn](s, insn);
2913 /* generate intermediate code for basic block 'tb'. */
2915 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
2918 DisasContext dc1, *dc = &dc1;
2919 uint16_t *gen_opc_end;
2921 target_ulong pc_start;
2927 /* generate intermediate code */
2932 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2935 dc->is_jmp = DISAS_NEXT;
2937 dc->cc_op = CC_OP_DYNAMIC;
2938 dc->singlestep_enabled = env->singlestep_enabled;
2939 dc->fpcr = env->fpcr;
2940 dc->user = (env->sr & SR_S) == 0;
2942 dc->mactmp = NULL_QREG;
2945 max_insns = tb->cflags & CF_COUNT_MASK;
2947 max_insns = CF_COUNT_MASK;
2951 pc_offset = dc->pc - pc_start;
2952 gen_throws_exception = NULL;
2953 if (env->nb_breakpoints > 0) {
2954 for(j = 0; j < env->nb_breakpoints; j++) {
2955 if (env->breakpoints[j] == dc->pc) {
2956 gen_exception(dc, dc->pc, EXCP_DEBUG);
2957 dc->is_jmp = DISAS_JUMP;
2965 j = gen_opc_ptr - gen_opc_buf;
2969 gen_opc_instr_start[lj++] = 0;
2971 gen_opc_pc[lj] = dc->pc;
2972 gen_opc_instr_start[lj] = 1;
2973 gen_opc_icount[lj] = num_insns;
2975 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2977 last_cc_op = dc->cc_op;
2978 dc->insn_pc = dc->pc;
2979 disas_m68k_insn(env, dc);
2982 /* Terminate the TB on memory ops if watchpoints are present. */
2983 /* FIXME: This should be replaced by the deterministic execution
2984 * IRQ raising bits. */
2985 if (dc->is_mem && env->nb_watchpoints)
2987 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2988 !env->singlestep_enabled &&
2989 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
2990 num_insns < max_insns);
2992 if (tb->cflags & CF_LAST_IO)
2994 if (unlikely(env->singlestep_enabled)) {
2995 /* Make sure the pc is updated, and raise a debug exception. */
2997 gen_flush_cc_op(dc);
2998 tcg_gen_movi_i32(QREG_PC, dc->pc);
3000 gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG));
3002 switch(dc->is_jmp) {
3004 gen_flush_cc_op(dc);
3005 gen_jmp_tb(dc, 0, dc->pc);
3010 gen_flush_cc_op(dc);
3011 /* indicate that the hash table must be used to find the next TB */
3015 /* nothing more to generate */
3019 gen_icount_end(tb, num_insns);
3020 *gen_opc_ptr = INDEX_op_end;
3023 if (loglevel & CPU_LOG_TB_IN_ASM) {
3024 fprintf(logfile, "----------------\n");
3025 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
3026 target_disas(logfile, pc_start, dc->pc - pc_start, 0);
3027 fprintf(logfile, "\n");
3031 j = gen_opc_ptr - gen_opc_buf;
3034 gen_opc_instr_start[lj++] = 0;
3036 tb->size = dc->pc - pc_start;
3037 tb->icount = num_insns;
3041 //expand_target_qops();
3045 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
3047 return gen_intermediate_code_internal(env, tb, 0);
3050 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
3052 return gen_intermediate_code_internal(env, tb, 1);
3055 void cpu_dump_state(CPUState *env, FILE *f,
3056 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3062 for (i = 0; i < 8; i++)
3064 u.d = env->fregs[i];
3065 cpu_fprintf (f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3066 i, env->dregs[i], i, env->aregs[i],
3067 i, u.l.upper, u.l.lower, *(double *)&u.d);
3069 cpu_fprintf (f, "PC = %08x ", env->pc);
3071 cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3072 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3073 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3074 cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3077 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3078 unsigned long searched_pc, int pc_pos, void *puc)
3080 env->pc = gen_opc_pc[pc_pos];