4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 //#define DEBUG_DISPATCH 1
40 /* Fake floating point. */
41 #define TCG_TYPE_F32 TCG_TYPE_I32
42 #define TCG_TYPE_F64 TCG_TYPE_I64
43 #define tcg_gen_mov_f64 tcg_gen_mov_i64
44 #define tcg_gen_qemu_ldf32 tcg_gen_qemu_ld32u
45 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
46 #define tcg_gen_qemu_stf32 tcg_gen_qemu_st32
47 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
48 #define gen_helper_pack_32_f32 tcg_gen_mov_i32
49 #define gen_helper_pack_f32_32 tcg_gen_mov_i32
51 #define DEFO32(name, offset) static TCGv QREG_##name;
52 #define DEFO64(name, offset) static TCGv QREG_##name;
53 #define DEFF64(name, offset) static TCGv QREG_##name;
61 static char cpu_reg_names[3*8*3 + 5*4];
62 static TCGv cpu_dregs[8];
63 static TCGv cpu_aregs[8];
64 static TCGv cpu_fregs[8];
65 static TCGv cpu_macc[4];
67 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
68 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
69 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
70 #define MACREG(acc) cpu_macc[acc]
71 #define QREG_SP cpu_aregs[7]
73 static TCGv NULL_QREG;
74 #define IS_NULL_QREG(t) (GET_TCGV(t) == GET_TCGV(NULL_QREG))
75 /* Used to distinguish stores from bad addressing modes. */
76 static TCGv store_dummy;
78 #include "gen-icount.h"
80 void m68k_tcg_init(void)
85 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, offset), #name);
86 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, offsetof(CPUState, offset), #name);
87 #define DEFF64(name, offset) DEFO64(name, offset)
93 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
96 for (i = 0; i < 8; i++) {
98 cpu_dregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
99 offsetof(CPUM68KState, dregs[i]), p);
101 sprintf(p, "A%d", i);
102 cpu_aregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
103 offsetof(CPUM68KState, aregs[i]), p);
105 sprintf(p, "F%d", i);
106 cpu_fregs[i] = tcg_global_mem_new(TCG_TYPE_F64, TCG_AREG0,
107 offsetof(CPUM68KState, fregs[i]), p);
110 for (i = 0; i < 4; i++) {
111 sprintf(p, "ACC%d", i);
112 cpu_macc[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
113 offsetof(CPUM68KState, macc[i]), p);
117 NULL_QREG = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, -4, "NULL");
118 store_dummy = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, -8, "NULL");
120 #define DEF_HELPER(name, ret, args) \
121 tcg_register_helper(HELPER(name), #name);
125 static inline void qemu_assert(int cond, const char *msg)
128 fprintf (stderr, "badness: %s\n", msg);
133 /* internal defines */
134 typedef struct DisasContext {
136 target_ulong insn_pc; /* Start of the current instruction. */
142 struct TranslationBlock *tb;
143 int singlestep_enabled;
148 #define DISAS_JUMP_NEXT 4
150 #if defined(CONFIG_USER_ONLY)
153 #define IS_USER(s) s->user
156 /* XXX: move that elsewhere */
157 /* ??? Fix exceptions. */
158 static void *gen_throws_exception;
159 #define gen_last_qop NULL
167 typedef void (*disas_proc)(DisasContext *, uint16_t);
169 #ifdef DEBUG_DISPATCH
170 #define DISAS_INSN(name) \
171 static void real_disas_##name (DisasContext *s, uint16_t insn); \
172 static void disas_##name (DisasContext *s, uint16_t insn) { \
173 if (logfile) fprintf(logfile, "Dispatch " #name "\n"); \
174 real_disas_##name(s, insn); } \
175 static void real_disas_##name (DisasContext *s, uint16_t insn)
177 #define DISAS_INSN(name) \
178 static void disas_##name (DisasContext *s, uint16_t insn)
181 /* FIXME: Remove this. */
182 #define gen_im32(val) tcg_const_i32(val)
184 #define QMODE_I32 TCG_TYPE_I32
185 #define QMODE_I64 TCG_TYPE_I64
186 #define QMODE_F32 TCG_TYPE_F32
187 #define QMODE_F64 TCG_TYPE_F64
188 static inline TCGv gen_new_qreg(int mode)
190 return tcg_temp_new(mode);
193 /* Generate a load from the specified address. Narrow values are
194 sign extended to full register width. */
195 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
198 int index = IS_USER(s);
202 tmp = gen_new_qreg(QMODE_I32);
204 tcg_gen_qemu_ld8s(tmp, addr, index);
206 tcg_gen_qemu_ld8u(tmp, addr, index);
209 tmp = gen_new_qreg(QMODE_I32);
211 tcg_gen_qemu_ld16s(tmp, addr, index);
213 tcg_gen_qemu_ld16u(tmp, addr, index);
216 tmp = gen_new_qreg(QMODE_I32);
217 tcg_gen_qemu_ld32u(tmp, addr, index);
220 tmp = gen_new_qreg(QMODE_F32);
221 tcg_gen_qemu_ldf32(tmp, addr, index);
224 tmp = gen_new_qreg(QMODE_F64);
225 tcg_gen_qemu_ldf64(tmp, addr, index);
228 qemu_assert(0, "bad load size");
230 gen_throws_exception = gen_last_qop;
234 /* Generate a store. */
235 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
237 int index = IS_USER(s);
241 tcg_gen_qemu_st8(val, addr, index);
244 tcg_gen_qemu_st16(val, addr, index);
247 tcg_gen_qemu_st32(val, addr, index);
250 tcg_gen_qemu_stf32(val, addr, index);
253 tcg_gen_qemu_stf64(val, addr, index);
256 qemu_assert(0, "bad store size");
258 gen_throws_exception = gen_last_qop;
267 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
268 otherwise generate a store. */
269 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
272 if (what == EA_STORE) {
273 gen_store(s, opsize, addr, val);
276 return gen_load(s, opsize, addr, what == EA_LOADS);
280 /* Read a 32-bit immediate constant. */
281 static inline uint32_t read_im32(DisasContext *s)
284 im = ((uint32_t)lduw_code(s->pc)) << 16;
286 im |= lduw_code(s->pc);
291 /* Calculate and address index. */
292 static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
297 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
298 if ((ext & 0x800) == 0) {
299 tcg_gen_ext16s_i32(tmp, add);
302 scale = (ext >> 9) & 3;
304 tcg_gen_shli_i32(tmp, add, scale);
310 /* Handle a base + index + displacement effective addresss.
311 A NULL_QREG base means pc-relative. */
312 static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base)
321 ext = lduw_code(s->pc);
324 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
328 /* full extension word format */
329 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
332 if ((ext & 0x30) > 0x10) {
333 /* base displacement */
334 if ((ext & 0x30) == 0x20) {
335 bd = (int16_t)lduw_code(s->pc);
343 tmp = gen_new_qreg(QMODE_I32);
344 if ((ext & 0x44) == 0) {
346 add = gen_addr_index(ext, tmp);
350 if ((ext & 0x80) == 0) {
351 /* base not suppressed */
352 if (IS_NULL_QREG(base)) {
353 base = gen_im32(offset + bd);
356 if (!IS_NULL_QREG(add)) {
357 tcg_gen_add_i32(tmp, add, base);
363 if (!IS_NULL_QREG(add)) {
365 tcg_gen_addi_i32(tmp, add, bd);
371 if ((ext & 3) != 0) {
372 /* memory indirect */
373 base = gen_load(s, OS_LONG, add, 0);
374 if ((ext & 0x44) == 4) {
375 add = gen_addr_index(ext, tmp);
376 tcg_gen_add_i32(tmp, add, base);
382 /* outer displacement */
383 if ((ext & 3) == 2) {
384 od = (int16_t)lduw_code(s->pc);
393 tcg_gen_addi_i32(tmp, add, od);
398 /* brief extension word format */
399 tmp = gen_new_qreg(QMODE_I32);
400 add = gen_addr_index(ext, tmp);
401 if (!IS_NULL_QREG(base)) {
402 tcg_gen_add_i32(tmp, add, base);
404 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
406 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
413 /* Update the CPU env CC_OP state. */
414 static inline void gen_flush_cc_op(DisasContext *s)
416 if (s->cc_op != CC_OP_DYNAMIC)
417 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
420 /* Evaluate all the CC flags. */
421 static inline void gen_flush_flags(DisasContext *s)
423 if (s->cc_op == CC_OP_FLAGS)
426 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
427 s->cc_op = CC_OP_FLAGS;
430 static void gen_logic_cc(DisasContext *s, TCGv val)
432 tcg_gen_mov_i32(QREG_CC_DEST, val);
433 s->cc_op = CC_OP_LOGIC;
436 static void gen_update_cc_add(TCGv dest, TCGv src)
438 tcg_gen_mov_i32(QREG_CC_DEST, dest);
439 tcg_gen_mov_i32(QREG_CC_SRC, src);
442 static inline int opsize_bytes(int opsize)
445 case OS_BYTE: return 1;
446 case OS_WORD: return 2;
447 case OS_LONG: return 4;
448 case OS_SINGLE: return 4;
449 case OS_DOUBLE: return 8;
451 qemu_assert(0, "bad operand size");
456 /* Assign value to a register. If the width is less than the register width
457 only the low part of the register is set. */
458 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
463 tcg_gen_andi_i32(reg, reg, 0xffffff00);
464 tmp = gen_new_qreg(QMODE_I32);
465 tcg_gen_ext8u_i32(tmp, val);
466 tcg_gen_or_i32(reg, reg, tmp);
469 tcg_gen_andi_i32(reg, reg, 0xffff0000);
470 tmp = gen_new_qreg(QMODE_I32);
471 tcg_gen_ext16u_i32(tmp, val);
472 tcg_gen_or_i32(reg, reg, tmp);
475 tcg_gen_mov_i32(reg, val);
478 gen_helper_pack_32_f32(reg, val);
481 qemu_assert(0, "Bad operand size");
486 /* Sign or zero extend a value. */
487 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
493 tmp = gen_new_qreg(QMODE_I32);
495 tcg_gen_ext8s_i32(tmp, val);
497 tcg_gen_ext8u_i32(tmp, val);
500 tmp = gen_new_qreg(QMODE_I32);
502 tcg_gen_ext16s_i32(tmp, val);
504 tcg_gen_ext16u_i32(tmp, val);
510 tmp = gen_new_qreg(QMODE_F32);
511 gen_helper_pack_f32_32(tmp, val);
514 qemu_assert(0, "Bad operand size");
519 /* Generate code for an "effective address". Does not adjust the base
520 register for autoincrememnt addressing modes. */
521 static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize)
528 switch ((insn >> 3) & 7) {
529 case 0: /* Data register direct. */
530 case 1: /* Address register direct. */
532 case 2: /* Indirect register */
533 case 3: /* Indirect postincrement. */
534 return AREG(insn, 0);
535 case 4: /* Indirect predecrememnt. */
537 tmp = gen_new_qreg(QMODE_I32);
538 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
540 case 5: /* Indirect displacement. */
542 tmp = gen_new_qreg(QMODE_I32);
543 ext = lduw_code(s->pc);
545 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
547 case 6: /* Indirect index + displacement. */
549 return gen_lea_indexed(s, opsize, reg);
552 case 0: /* Absolute short. */
553 offset = ldsw_code(s->pc);
555 return gen_im32(offset);
556 case 1: /* Absolute long. */
557 offset = read_im32(s);
558 return gen_im32(offset);
559 case 2: /* pc displacement */
560 tmp = gen_new_qreg(QMODE_I32);
562 offset += ldsw_code(s->pc);
564 return gen_im32(offset);
565 case 3: /* pc index+displacement. */
566 return gen_lea_indexed(s, opsize, NULL_QREG);
567 case 4: /* Immediate. */
572 /* Should never happen. */
576 /* Helper function for gen_ea. Reuse the computed address between the
577 for read/write operands. */
578 static inline TCGv gen_ea_once(DisasContext *s, uint16_t insn, int opsize,
579 TCGv val, TCGv *addrp, ea_what what)
583 if (addrp && what == EA_STORE) {
586 tmp = gen_lea(s, insn, opsize);
587 if (IS_NULL_QREG(tmp))
592 return gen_ldst(s, opsize, tmp, val, what);
595 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
596 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
597 ADDRP is non-null for readwrite operands. */
598 static TCGv gen_ea(DisasContext *s, uint16_t insn, int opsize, TCGv val,
599 TCGv *addrp, ea_what what)
605 switch ((insn >> 3) & 7) {
606 case 0: /* Data register direct. */
608 if (what == EA_STORE) {
609 gen_partset_reg(opsize, reg, val);
612 return gen_extend(reg, opsize, what == EA_LOADS);
614 case 1: /* Address register direct. */
616 if (what == EA_STORE) {
617 tcg_gen_mov_i32(reg, val);
620 return gen_extend(reg, opsize, what == EA_LOADS);
622 case 2: /* Indirect register */
624 return gen_ldst(s, opsize, reg, val, what);
625 case 3: /* Indirect postincrement. */
627 result = gen_ldst(s, opsize, reg, val, what);
628 /* ??? This is not exception safe. The instruction may still
629 fault after this point. */
630 if (what == EA_STORE || !addrp)
631 tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
633 case 4: /* Indirect predecrememnt. */
636 if (addrp && what == EA_STORE) {
639 tmp = gen_lea(s, insn, opsize);
640 if (IS_NULL_QREG(tmp))
645 result = gen_ldst(s, opsize, tmp, val, what);
646 /* ??? This is not exception safe. The instruction may still
647 fault after this point. */
648 if (what == EA_STORE || !addrp) {
650 tcg_gen_mov_i32(reg, tmp);
654 case 5: /* Indirect displacement. */
655 case 6: /* Indirect index + displacement. */
656 return gen_ea_once(s, insn, opsize, val, addrp, what);
659 case 0: /* Absolute short. */
660 case 1: /* Absolute long. */
661 case 2: /* pc displacement */
662 case 3: /* pc index+displacement. */
663 return gen_ea_once(s, insn, opsize, val, addrp, what);
664 case 4: /* Immediate. */
665 /* Sign extend values for consistency. */
668 if (what == EA_LOADS)
669 offset = ldsb_code(s->pc + 1);
671 offset = ldub_code(s->pc + 1);
675 if (what == EA_LOADS)
676 offset = ldsw_code(s->pc);
678 offset = lduw_code(s->pc);
682 offset = read_im32(s);
685 qemu_assert(0, "Bad immediate operand");
687 return tcg_const_i32(offset);
692 /* Should never happen. */
696 /* This generates a conditional branch, clobbering all temporaries. */
697 static void gen_jmpcc(DisasContext *s, int cond, int l1)
701 /* TODO: Optimize compare/branch pairs rather than always flushing
702 flag state to CC_OP_FLAGS. */
710 case 2: /* HI (!C && !Z) */
711 tmp = gen_new_qreg(QMODE_I32);
712 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
715 case 3: /* LS (C || Z) */
716 tmp = gen_new_qreg(QMODE_I32);
717 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
718 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
720 case 4: /* CC (!C) */
721 tmp = gen_new_qreg(QMODE_I32);
722 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
723 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
726 tmp = gen_new_qreg(QMODE_I32);
727 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
728 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
730 case 6: /* NE (!Z) */
731 tmp = gen_new_qreg(QMODE_I32);
732 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
733 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
736 tmp = gen_new_qreg(QMODE_I32);
737 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
738 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
740 case 8: /* VC (!V) */
741 tmp = gen_new_qreg(QMODE_I32);
742 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
743 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
746 tmp = gen_new_qreg(QMODE_I32);
747 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
748 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
750 case 10: /* PL (!N) */
751 tmp = gen_new_qreg(QMODE_I32);
752 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
753 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
755 case 11: /* MI (N) */
756 tmp = gen_new_qreg(QMODE_I32);
757 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
758 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
760 case 12: /* GE (!(N ^ V)) */
761 tmp = gen_new_qreg(QMODE_I32);
762 assert(CCF_V == (CCF_N >> 2));
763 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
764 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
765 tcg_gen_andi_i32(tmp, tmp, CCF_V);
766 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
768 case 13: /* LT (N ^ V) */
769 tmp = gen_new_qreg(QMODE_I32);
770 assert(CCF_V == (CCF_N >> 2));
771 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
772 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
773 tcg_gen_andi_i32(tmp, tmp, CCF_V);
774 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
776 case 14: /* GT (!(Z || (N ^ V))) */
777 tmp = gen_new_qreg(QMODE_I32);
778 assert(CCF_V == (CCF_N >> 2));
779 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
780 tcg_gen_shri_i32(tmp, tmp, 2);
781 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
782 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
783 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
785 case 15: /* LE (Z || (N ^ V)) */
786 tmp = gen_new_qreg(QMODE_I32);
787 assert(CCF_V == (CCF_N >> 2));
788 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
789 tcg_gen_shri_i32(tmp, tmp, 2);
790 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
791 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
792 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
795 /* Should ever happen. */
806 l1 = gen_new_label();
807 cond = (insn >> 8) & 0xf;
809 tcg_gen_andi_i32(reg, reg, 0xffffff00);
810 /* This is safe because we modify the reg directly, with no other values
812 gen_jmpcc(s, cond ^ 1, l1);
813 tcg_gen_ori_i32(reg, reg, 0xff);
817 /* Force a TB lookup after an instruction that changes the CPU state. */
818 static void gen_lookup_tb(DisasContext *s)
821 tcg_gen_movi_i32(QREG_PC, s->pc);
822 s->is_jmp = DISAS_UPDATE;
825 /* Generate a jump to an immediate address. */
826 static void gen_jmp_im(DisasContext *s, uint32_t dest)
829 tcg_gen_movi_i32(QREG_PC, dest);
830 s->is_jmp = DISAS_JUMP;
833 /* Generate a jump to the address in qreg DEST. */
834 static void gen_jmp(DisasContext *s, TCGv dest)
837 tcg_gen_mov_i32(QREG_PC, dest);
838 s->is_jmp = DISAS_JUMP;
841 static void gen_exception(DisasContext *s, uint32_t where, int nr)
844 gen_jmp_im(s, where);
845 gen_helper_raise_exception(tcg_const_i32(nr));
848 static inline void gen_addr_fault(DisasContext *s)
850 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
853 #define SRC_EA(result, opsize, op_sign, addrp) do { \
854 result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
855 if (IS_NULL_QREG(result)) { \
861 #define DEST_EA(insn, opsize, val, addrp) do { \
862 TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
863 if (IS_NULL_QREG(ea_result)) { \
869 /* Generate a jump to an immediate address. */
870 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
872 TranslationBlock *tb;
875 if (unlikely(s->singlestep_enabled)) {
876 gen_exception(s, dest, EXCP_DEBUG);
877 } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
878 (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
880 tcg_gen_movi_i32(QREG_PC, dest);
881 tcg_gen_exit_tb((long)tb + n);
886 s->is_jmp = DISAS_TB_JUMP;
889 DISAS_INSN(undef_mac)
891 gen_exception(s, s->pc - 2, EXCP_LINEA);
894 DISAS_INSN(undef_fpu)
896 gen_exception(s, s->pc - 2, EXCP_LINEF);
901 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
902 cpu_abort(cpu_single_env, "Illegal instruction: %04x @ %08x",
913 sign = (insn & 0x100) != 0;
915 tmp = gen_new_qreg(QMODE_I32);
917 tcg_gen_ext16s_i32(tmp, reg);
919 tcg_gen_ext16u_i32(tmp, reg);
920 SRC_EA(src, OS_WORD, sign, NULL);
921 tcg_gen_mul_i32(tmp, tmp, src);
922 tcg_gen_mov_i32(reg, tmp);
923 /* Unlike m68k, coldfire always clears the overflow bit. */
924 gen_logic_cc(s, tmp);
934 sign = (insn & 0x100) != 0;
937 tcg_gen_ext16s_i32(QREG_DIV1, reg);
939 tcg_gen_ext16u_i32(QREG_DIV1, reg);
941 SRC_EA(src, OS_WORD, sign, NULL);
942 tcg_gen_mov_i32(QREG_DIV2, src);
944 gen_helper_divs(cpu_env, tcg_const_i32(1));
946 gen_helper_divu(cpu_env, tcg_const_i32(1));
949 tmp = gen_new_qreg(QMODE_I32);
950 src = gen_new_qreg(QMODE_I32);
951 tcg_gen_ext16u_i32(tmp, QREG_DIV1);
952 tcg_gen_shli_i32(src, QREG_DIV2, 16);
953 tcg_gen_or_i32(reg, tmp, src);
954 s->cc_op = CC_OP_FLAGS;
964 ext = lduw_code(s->pc);
967 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
972 tcg_gen_mov_i32(QREG_DIV1, num);
973 SRC_EA(den, OS_LONG, 0, NULL);
974 tcg_gen_mov_i32(QREG_DIV2, den);
976 gen_helper_divs(cpu_env, tcg_const_i32(0));
978 gen_helper_divu(cpu_env, tcg_const_i32(0));
980 if ((ext & 7) == ((ext >> 12) & 7)) {
982 tcg_gen_mov_i32 (reg, QREG_DIV1);
985 tcg_gen_mov_i32 (reg, QREG_DIV2);
987 s->cc_op = CC_OP_FLAGS;
999 add = (insn & 0x4000) != 0;
1000 reg = DREG(insn, 9);
1001 dest = gen_new_qreg(QMODE_I32);
1003 SRC_EA(tmp, OS_LONG, 0, &addr);
1007 SRC_EA(src, OS_LONG, 0, NULL);
1010 tcg_gen_add_i32(dest, tmp, src);
1011 gen_helper_xflag_lt(QREG_CC_X, dest, src);
1012 s->cc_op = CC_OP_ADD;
1014 gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1015 tcg_gen_sub_i32(dest, tmp, src);
1016 s->cc_op = CC_OP_SUB;
1018 gen_update_cc_add(dest, src);
1020 DEST_EA(insn, OS_LONG, dest, &addr);
1022 tcg_gen_mov_i32(reg, dest);
1027 /* Reverse the order of the bits in REG. */
1031 reg = DREG(insn, 0);
1032 gen_helper_bitrev(reg, reg);
1035 DISAS_INSN(bitop_reg)
1045 if ((insn & 0x38) != 0)
1049 op = (insn >> 6) & 3;
1050 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1051 src2 = DREG(insn, 9);
1052 dest = gen_new_qreg(QMODE_I32);
1055 tmp = gen_new_qreg(QMODE_I32);
1056 if (opsize == OS_BYTE)
1057 tcg_gen_andi_i32(tmp, src2, 7);
1059 tcg_gen_andi_i32(tmp, src2, 31);
1061 tmp = gen_new_qreg(QMODE_I32);
1062 tcg_gen_shr_i32(tmp, src1, src2);
1063 tcg_gen_andi_i32(tmp, tmp, 1);
1064 tcg_gen_shli_i32(tmp, tmp, 2);
1065 /* Clear CCF_Z if bit set. */
1066 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1067 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1069 tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1072 tcg_gen_xor_i32(dest, src1, tmp);
1075 tcg_gen_not_i32(tmp, tmp);
1076 tcg_gen_and_i32(dest, src1, tmp);
1079 tcg_gen_or_i32(dest, src1, tmp);
1085 DEST_EA(insn, opsize, dest, &addr);
1091 reg = DREG(insn, 0);
1093 gen_helper_sats(reg, reg, QREG_CC_DEST);
1094 gen_logic_cc(s, reg);
1097 static void gen_push(DisasContext *s, TCGv val)
1101 tmp = gen_new_qreg(QMODE_I32);
1102 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1103 gen_store(s, OS_LONG, tmp, val);
1104 tcg_gen_mov_i32(QREG_SP, tmp);
1116 mask = lduw_code(s->pc);
1118 tmp = gen_lea(s, insn, OS_LONG);
1119 if (IS_NULL_QREG(tmp)) {
1123 addr = gen_new_qreg(QMODE_I32);
1124 tcg_gen_mov_i32(addr, tmp);
1125 is_load = ((insn & 0x0400) != 0);
1126 for (i = 0; i < 16; i++, mask >>= 1) {
1133 tmp = gen_load(s, OS_LONG, addr, 0);
1134 tcg_gen_mov_i32(reg, tmp);
1136 gen_store(s, OS_LONG, addr, reg);
1139 tcg_gen_addi_i32(addr, addr, 4);
1144 DISAS_INSN(bitop_im)
1154 if ((insn & 0x38) != 0)
1158 op = (insn >> 6) & 3;
1160 bitnum = lduw_code(s->pc);
1162 if (bitnum & 0xff00) {
1163 disas_undef(s, insn);
1167 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1170 if (opsize == OS_BYTE)
1176 tmp = gen_new_qreg(QMODE_I32);
1177 assert (CCF_Z == (1 << 2));
1179 tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1180 else if (bitnum < 2)
1181 tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1183 tcg_gen_mov_i32(tmp, src1);
1184 tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1185 /* Clear CCF_Z if bit set. */
1186 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1187 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1191 tcg_gen_xori_i32(tmp, src1, mask);
1194 tcg_gen_andi_i32(tmp, src1, ~mask);
1197 tcg_gen_ori_i32(tmp, src1, mask);
1202 DEST_EA(insn, opsize, tmp, &addr);
1206 DISAS_INSN(arith_im)
1214 op = (insn >> 9) & 7;
1215 SRC_EA(src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1217 dest = gen_new_qreg(QMODE_I32);
1220 tcg_gen_ori_i32(dest, src1, im);
1221 gen_logic_cc(s, dest);
1224 tcg_gen_andi_i32(dest, src1, im);
1225 gen_logic_cc(s, dest);
1228 tcg_gen_mov_i32(dest, src1);
1229 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1230 tcg_gen_subi_i32(dest, dest, im);
1231 gen_update_cc_add(dest, gen_im32(im));
1232 s->cc_op = CC_OP_SUB;
1235 tcg_gen_mov_i32(dest, src1);
1236 tcg_gen_addi_i32(dest, dest, im);
1237 gen_update_cc_add(dest, gen_im32(im));
1238 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1239 s->cc_op = CC_OP_ADD;
1242 tcg_gen_xori_i32(dest, src1, im);
1243 gen_logic_cc(s, dest);
1246 tcg_gen_mov_i32(dest, src1);
1247 tcg_gen_subi_i32(dest, dest, im);
1248 gen_update_cc_add(dest, gen_im32(im));
1249 s->cc_op = CC_OP_SUB;
1255 DEST_EA(insn, OS_LONG, dest, &addr);
1263 reg = DREG(insn, 0);
1264 tcg_gen_bswap_i32(reg, reg);
1274 switch (insn >> 12) {
1275 case 1: /* move.b */
1278 case 2: /* move.l */
1281 case 3: /* move.w */
1287 SRC_EA(src, opsize, 1, NULL);
1288 op = (insn >> 6) & 7;
1291 /* The value will already have been sign extended. */
1292 dest = AREG(insn, 9);
1293 tcg_gen_mov_i32(dest, src);
1297 dest_ea = ((insn >> 9) & 7) | (op << 3);
1298 DEST_EA(dest_ea, opsize, src, NULL);
1299 /* This will be correct because loads sign extend. */
1300 gen_logic_cc(s, src);
1309 reg = DREG(insn, 0);
1310 gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1318 reg = AREG(insn, 9);
1319 tmp = gen_lea(s, insn, OS_LONG);
1320 if (IS_NULL_QREG(tmp)) {
1324 tcg_gen_mov_i32(reg, tmp);
1331 switch ((insn >> 6) & 3) {
1344 DEST_EA(insn, opsize, gen_im32(0), NULL);
1345 gen_logic_cc(s, gen_im32(0));
1348 static TCGv gen_get_ccr(DisasContext *s)
1353 dest = gen_new_qreg(QMODE_I32);
1354 tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1355 tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1359 DISAS_INSN(move_from_ccr)
1364 ccr = gen_get_ccr(s);
1365 reg = DREG(insn, 0);
1366 gen_partset_reg(OS_WORD, reg, ccr);
1374 reg = DREG(insn, 0);
1375 src1 = gen_new_qreg(QMODE_I32);
1376 tcg_gen_mov_i32(src1, reg);
1377 tcg_gen_neg_i32(reg, src1);
1378 s->cc_op = CC_OP_SUB;
1379 gen_update_cc_add(reg, src1);
1380 gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1381 s->cc_op = CC_OP_SUB;
1384 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1386 tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1387 tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1389 gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1393 static void gen_set_sr(DisasContext *s, uint16_t insn, int ccr_only)
1398 s->cc_op = CC_OP_FLAGS;
1399 if ((insn & 0x38) == 0)
1401 tmp = gen_new_qreg(QMODE_I32);
1402 reg = DREG(insn, 0);
1403 tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1404 tcg_gen_shri_i32(tmp, reg, 4);
1405 tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1407 gen_helper_set_sr(cpu_env, reg);
1410 else if ((insn & 0x3f) == 0x3c)
1413 val = lduw_code(s->pc);
1415 gen_set_sr_im(s, val, ccr_only);
1418 disas_undef(s, insn);
1421 DISAS_INSN(move_to_ccr)
1423 gen_set_sr(s, insn, 1);
1430 reg = DREG(insn, 0);
1431 tcg_gen_not_i32(reg, reg);
1432 gen_logic_cc(s, reg);
1441 src1 = gen_new_qreg(QMODE_I32);
1442 src2 = gen_new_qreg(QMODE_I32);
1443 reg = DREG(insn, 0);
1444 tcg_gen_shli_i32(src1, reg, 16);
1445 tcg_gen_shri_i32(src2, reg, 16);
1446 tcg_gen_or_i32(reg, src1, src2);
1447 gen_logic_cc(s, reg);
1454 tmp = gen_lea(s, insn, OS_LONG);
1455 if (IS_NULL_QREG(tmp)) {
1468 reg = DREG(insn, 0);
1469 op = (insn >> 6) & 7;
1470 tmp = gen_new_qreg(QMODE_I32);
1472 tcg_gen_ext16s_i32(tmp, reg);
1474 tcg_gen_ext8s_i32(tmp, reg);
1476 gen_partset_reg(OS_WORD, reg, tmp);
1478 tcg_gen_mov_i32(reg, tmp);
1479 gen_logic_cc(s, tmp);
1487 switch ((insn >> 6) & 3) {
1500 SRC_EA(tmp, opsize, 1, NULL);
1501 gen_logic_cc(s, tmp);
1506 /* Implemented as a NOP. */
1511 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1514 /* ??? This should be atomic. */
1521 dest = gen_new_qreg(QMODE_I32);
1522 SRC_EA(src1, OS_BYTE, 1, &addr);
1523 gen_logic_cc(s, src1);
1524 tcg_gen_ori_i32(dest, src1, 0x80);
1525 DEST_EA(insn, OS_BYTE, dest, &addr);
1535 /* The upper 32 bits of the product are discarded, so
1536 muls.l and mulu.l are functionally equivalent. */
1537 ext = lduw_code(s->pc);
1540 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1543 reg = DREG(ext, 12);
1544 SRC_EA(src1, OS_LONG, 0, NULL);
1545 dest = gen_new_qreg(QMODE_I32);
1546 tcg_gen_mul_i32(dest, src1, reg);
1547 tcg_gen_mov_i32(reg, dest);
1548 /* Unlike m68k, coldfire always clears the overflow bit. */
1549 gen_logic_cc(s, dest);
1558 offset = ldsw_code(s->pc);
1560 reg = AREG(insn, 0);
1561 tmp = gen_new_qreg(QMODE_I32);
1562 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1563 gen_store(s, OS_LONG, tmp, reg);
1564 if ((insn & 7) != 7)
1565 tcg_gen_mov_i32(reg, tmp);
1566 tcg_gen_addi_i32(QREG_SP, tmp, offset);
1575 src = gen_new_qreg(QMODE_I32);
1576 reg = AREG(insn, 0);
1577 tcg_gen_mov_i32(src, reg);
1578 tmp = gen_load(s, OS_LONG, src, 0);
1579 tcg_gen_mov_i32(reg, tmp);
1580 tcg_gen_addi_i32(QREG_SP, src, 4);
1591 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1592 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1600 /* Load the target address first to ensure correct exception
1602 tmp = gen_lea(s, insn, OS_LONG);
1603 if (IS_NULL_QREG(tmp)) {
1607 if ((insn & 0x40) == 0) {
1609 gen_push(s, gen_im32(s->pc));
1622 SRC_EA(src1, OS_LONG, 0, &addr);
1623 val = (insn >> 9) & 7;
1626 dest = gen_new_qreg(QMODE_I32);
1627 tcg_gen_mov_i32(dest, src1);
1628 if ((insn & 0x38) == 0x08) {
1629 /* Don't update condition codes if the destination is an
1630 address register. */
1631 if (insn & 0x0100) {
1632 tcg_gen_subi_i32(dest, dest, val);
1634 tcg_gen_addi_i32(dest, dest, val);
1637 src2 = gen_im32(val);
1638 if (insn & 0x0100) {
1639 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1640 tcg_gen_subi_i32(dest, dest, val);
1641 s->cc_op = CC_OP_SUB;
1643 tcg_gen_addi_i32(dest, dest, val);
1644 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1645 s->cc_op = CC_OP_ADD;
1647 gen_update_cc_add(dest, src2);
1649 DEST_EA(insn, OS_LONG, dest, &addr);
1655 case 2: /* One extension word. */
1658 case 3: /* Two extension words. */
1661 case 4: /* No extension words. */
1664 disas_undef(s, insn);
1676 op = (insn >> 8) & 0xf;
1677 offset = (int8_t)insn;
1679 offset = ldsw_code(s->pc);
1681 } else if (offset == -1) {
1682 offset = read_im32(s);
1686 gen_push(s, gen_im32(s->pc));
1691 l1 = gen_new_label();
1692 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1693 gen_jmp_tb(s, 1, base + offset);
1695 gen_jmp_tb(s, 0, s->pc);
1697 /* Unconditional branch. */
1698 gen_jmp_tb(s, 0, base + offset);
1707 tcg_gen_movi_i32(DREG(insn, 9), val);
1708 gen_logic_cc(s, tcg_const_i32(val));
1721 SRC_EA(src, opsize, (insn & 0x80) == 0, NULL);
1722 reg = DREG(insn, 9);
1723 tcg_gen_mov_i32(reg, src);
1724 gen_logic_cc(s, src);
1734 reg = DREG(insn, 9);
1735 dest = gen_new_qreg(QMODE_I32);
1737 SRC_EA(src, OS_LONG, 0, &addr);
1738 tcg_gen_or_i32(dest, src, reg);
1739 DEST_EA(insn, OS_LONG, dest, &addr);
1741 SRC_EA(src, OS_LONG, 0, NULL);
1742 tcg_gen_or_i32(dest, src, reg);
1743 tcg_gen_mov_i32(reg, dest);
1745 gen_logic_cc(s, dest);
1753 SRC_EA(src, OS_LONG, 0, NULL);
1754 reg = AREG(insn, 9);
1755 tcg_gen_sub_i32(reg, reg, src);
1764 reg = DREG(insn, 9);
1765 src = DREG(insn, 0);
1766 gen_helper_subx_cc(reg, cpu_env, reg, src);
1774 val = (insn >> 9) & 7;
1777 src = gen_im32(val);
1778 gen_logic_cc(s, src);
1779 DEST_EA(insn, OS_LONG, src, NULL);
1790 op = (insn >> 6) & 3;
1794 s->cc_op = CC_OP_CMPB;
1798 s->cc_op = CC_OP_CMPW;
1802 s->cc_op = CC_OP_SUB;
1807 SRC_EA(src, opsize, 1, NULL);
1808 reg = DREG(insn, 9);
1809 dest = gen_new_qreg(QMODE_I32);
1810 tcg_gen_sub_i32(dest, reg, src);
1811 gen_update_cc_add(dest, src);
1826 SRC_EA(src, opsize, 1, NULL);
1827 reg = AREG(insn, 9);
1828 dest = gen_new_qreg(QMODE_I32);
1829 tcg_gen_sub_i32(dest, reg, src);
1830 gen_update_cc_add(dest, src);
1831 s->cc_op = CC_OP_SUB;
1841 SRC_EA(src, OS_LONG, 0, &addr);
1842 reg = DREG(insn, 9);
1843 dest = gen_new_qreg(QMODE_I32);
1844 tcg_gen_xor_i32(dest, src, reg);
1845 gen_logic_cc(s, dest);
1846 DEST_EA(insn, OS_LONG, dest, &addr);
1856 reg = DREG(insn, 9);
1857 dest = gen_new_qreg(QMODE_I32);
1859 SRC_EA(src, OS_LONG, 0, &addr);
1860 tcg_gen_and_i32(dest, src, reg);
1861 DEST_EA(insn, OS_LONG, dest, &addr);
1863 SRC_EA(src, OS_LONG, 0, NULL);
1864 tcg_gen_and_i32(dest, src, reg);
1865 tcg_gen_mov_i32(reg, dest);
1867 gen_logic_cc(s, dest);
1875 SRC_EA(src, OS_LONG, 0, NULL);
1876 reg = AREG(insn, 9);
1877 tcg_gen_add_i32(reg, reg, src);
1886 reg = DREG(insn, 9);
1887 src = DREG(insn, 0);
1888 gen_helper_addx_cc(reg, cpu_env, reg, src);
1889 s->cc_op = CC_OP_FLAGS;
1892 /* TODO: This could be implemented without helper functions. */
1893 DISAS_INSN(shift_im)
1899 reg = DREG(insn, 0);
1900 tmp = (insn >> 9) & 7;
1903 shift = gen_im32(tmp);
1904 /* No need to flush flags becuse we know we will set C flag. */
1906 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1909 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1911 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1914 s->cc_op = CC_OP_SHIFT;
1917 DISAS_INSN(shift_reg)
1922 reg = DREG(insn, 0);
1923 shift = DREG(insn, 9);
1924 /* Shift by zero leaves C flag unmodified. */
1927 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1930 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1932 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1935 s->cc_op = CC_OP_SHIFT;
1941 reg = DREG(insn, 0);
1942 gen_logic_cc(s, reg);
1943 gen_helper_ff1(reg, reg);
1946 static TCGv gen_get_sr(DisasContext *s)
1951 ccr = gen_get_ccr(s);
1952 sr = gen_new_qreg(QMODE_I32);
1953 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1954 tcg_gen_or_i32(sr, sr, ccr);
1964 ext = lduw_code(s->pc);
1966 if (ext != 0x46FC) {
1967 gen_exception(s, addr, EXCP_UNSUPPORTED);
1970 ext = lduw_code(s->pc);
1972 if (IS_USER(s) || (ext & SR_S) == 0) {
1973 gen_exception(s, addr, EXCP_PRIVILEGE);
1976 gen_push(s, gen_get_sr(s));
1977 gen_set_sr_im(s, ext, 0);
1980 DISAS_INSN(move_from_sr)
1986 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1990 reg = DREG(insn, 0);
1991 gen_partset_reg(OS_WORD, reg, sr);
1994 DISAS_INSN(move_to_sr)
1997 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2000 gen_set_sr(s, insn, 0);
2004 DISAS_INSN(move_from_usp)
2007 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2010 /* TODO: Implement USP. */
2011 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2014 DISAS_INSN(move_to_usp)
2017 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2020 /* TODO: Implement USP. */
2021 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2026 gen_exception(s, s->pc, EXCP_HALT_INSN);
2034 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2038 ext = lduw_code(s->pc);
2041 gen_set_sr_im(s, ext, 0);
2042 tcg_gen_movi_i32(QREG_HALTED, 1);
2043 gen_exception(s, s->pc, EXCP_HLT);
2049 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2052 gen_exception(s, s->pc - 2, EXCP_RTE);
2061 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2065 ext = lduw_code(s->pc);
2069 reg = AREG(ext, 12);
2071 reg = DREG(ext, 12);
2073 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2080 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2083 /* ICache fetch. Implement as no-op. */
2089 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2092 /* Cache push/invalidate. Implement as no-op. */
2097 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2103 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2106 /* TODO: Implement wdebug. */
2107 qemu_assert(0, "WDEBUG not implemented");
2112 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2115 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2116 immediately before the next FP instruction is executed. */
2127 ext = lduw_code(s->pc);
2129 opmode = ext & 0x7f;
2130 switch ((ext >> 13) & 7) {
2135 case 3: /* fmove out */
2138 /* ??? TODO: Proper behavior on overflow. */
2139 switch ((ext >> 10) & 7) {
2142 res = gen_new_qreg(QMODE_I32);
2143 gen_helper_f64_to_i32(res, cpu_env, src);
2147 res = gen_new_qreg(QMODE_F32);
2148 gen_helper_f64_to_f32(res, cpu_env, src);
2152 res = gen_new_qreg(QMODE_I32);
2153 gen_helper_f64_to_i32(res, cpu_env, src);
2161 res = gen_new_qreg(QMODE_I32);
2162 gen_helper_f64_to_i32(res, cpu_env, src);
2167 DEST_EA(insn, opsize, res, NULL);
2169 case 4: /* fmove to control register. */
2170 switch ((ext >> 10) & 7) {
2172 /* Not implemented. Ignore writes. */
2177 cpu_abort(NULL, "Unimplemented: fmove to control %d",
2181 case 5: /* fmove from control register. */
2182 switch ((ext >> 10) & 7) {
2184 /* Not implemented. Always return zero. */
2190 cpu_abort(NULL, "Unimplemented: fmove from control %d",
2194 DEST_EA(insn, OS_LONG, res, NULL);
2196 case 6: /* fmovem */
2202 if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2204 src = gen_lea(s, insn, OS_LONG);
2205 if (IS_NULL_QREG(src)) {
2209 addr = gen_new_qreg(QMODE_I32);
2210 tcg_gen_mov_i32(addr, src);
2212 for (i = 0; i < 8; i++) {
2216 if (ext & (1 << 13)) {
2218 tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2221 tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2223 if (ext & (mask - 1))
2224 tcg_gen_addi_i32(addr, addr, 8);
2231 if (ext & (1 << 14)) {
2234 /* Source effective address. */
2235 switch ((ext >> 10) & 7) {
2236 case 0: opsize = OS_LONG; break;
2237 case 1: opsize = OS_SINGLE; break;
2238 case 4: opsize = OS_WORD; break;
2239 case 5: opsize = OS_DOUBLE; break;
2240 case 6: opsize = OS_BYTE; break;
2244 SRC_EA(tmp, opsize, 1, NULL);
2245 if (opsize == OS_DOUBLE) {
2248 src = gen_new_qreg(QMODE_F64);
2253 gen_helper_i32_to_f64(src, cpu_env, tmp);
2256 gen_helper_f32_to_f64(src, cpu_env, tmp);
2261 /* Source register. */
2262 src = FREG(ext, 10);
2264 dest = FREG(ext, 7);
2265 res = gen_new_qreg(QMODE_F64);
2267 tcg_gen_mov_f64(res, dest);
2270 case 0: case 0x40: case 0x44: /* fmove */
2271 tcg_gen_mov_f64(res, src);
2274 gen_helper_iround_f64(res, cpu_env, src);
2277 case 3: /* fintrz */
2278 gen_helper_itrunc_f64(res, cpu_env, src);
2281 case 4: case 0x41: case 0x45: /* fsqrt */
2282 gen_helper_sqrt_f64(res, cpu_env, src);
2284 case 0x18: case 0x58: case 0x5c: /* fabs */
2285 gen_helper_abs_f64(res, src);
2287 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2288 gen_helper_chs_f64(res, src);
2290 case 0x20: case 0x60: case 0x64: /* fdiv */
2291 gen_helper_div_f64(res, cpu_env, res, src);
2293 case 0x22: case 0x62: case 0x66: /* fadd */
2294 gen_helper_add_f64(res, cpu_env, res, src);
2296 case 0x23: case 0x63: case 0x67: /* fmul */
2297 gen_helper_mul_f64(res, cpu_env, res, src);
2299 case 0x28: case 0x68: case 0x6c: /* fsub */
2300 gen_helper_sub_f64(res, cpu_env, res, src);
2302 case 0x38: /* fcmp */
2303 gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2307 case 0x3a: /* ftst */
2308 tcg_gen_mov_f64(res, src);
2316 if (opmode & 0x40) {
2317 if ((opmode & 0x4) != 0)
2319 } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2326 tmp = gen_new_qreg(QMODE_F32);
2327 gen_helper_f64_to_f32(tmp, cpu_env, res);
2328 gen_helper_f32_to_f64(res, cpu_env, tmp);
2330 tcg_gen_mov_f64(QREG_FP_RESULT, res);
2331 if (!IS_NULL_QREG(dest)) {
2332 tcg_gen_mov_f64(dest, res);
2337 disas_undef_fpu(s, insn);
2348 offset = ldsw_code(s->pc);
2350 if (insn & (1 << 6)) {
2351 offset = (offset << 16) | lduw_code(s->pc);
2355 l1 = gen_new_label();
2356 /* TODO: Raise BSUN exception. */
2357 flag = gen_new_qreg(QMODE_I32);
2358 gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2359 /* Jump to l1 if condition is true. */
2360 switch (insn & 0xf) {
2363 case 1: /* eq (=0) */
2364 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2366 case 2: /* ogt (=1) */
2367 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2369 case 3: /* oge (=0 or =1) */
2370 tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2372 case 4: /* olt (=-1) */
2373 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2375 case 5: /* ole (=-1 or =0) */
2376 tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2378 case 6: /* ogl (=-1 or =1) */
2379 tcg_gen_andi_i32(flag, flag, 1);
2380 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2382 case 7: /* or (=2) */
2383 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2385 case 8: /* un (<2) */
2386 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2388 case 9: /* ueq (=0 or =2) */
2389 tcg_gen_andi_i32(flag, flag, 1);
2390 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2392 case 10: /* ugt (>0) */
2393 tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2395 case 11: /* uge (>=0) */
2396 tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2398 case 12: /* ult (=-1 or =2) */
2399 tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2401 case 13: /* ule (!=1) */
2402 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2404 case 14: /* ne (!=0) */
2405 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2411 gen_jmp_tb(s, 0, s->pc);
2413 gen_jmp_tb(s, 1, addr + offset);
2416 DISAS_INSN(frestore)
2418 /* TODO: Implement frestore. */
2419 qemu_assert(0, "FRESTORE not implemented");
2424 /* TODO: Implement fsave. */
2425 qemu_assert(0, "FSAVE not implemented");
2428 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2430 TCGv tmp = gen_new_qreg(QMODE_I32);
2431 if (s->env->macsr & MACSR_FI) {
2433 tcg_gen_andi_i32(tmp, val, 0xffff0000);
2435 tcg_gen_shli_i32(tmp, val, 16);
2436 } else if (s->env->macsr & MACSR_SU) {
2438 tcg_gen_sari_i32(tmp, val, 16);
2440 tcg_gen_ext16s_i32(tmp, val);
2443 tcg_gen_shri_i32(tmp, val, 16);
2445 tcg_gen_ext16u_i32(tmp, val);
2450 static void gen_mac_clear_flags(void)
2452 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2453 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2468 if (IS_NULL_QREG(s->mactmp))
2469 s->mactmp = tcg_temp_new(TCG_TYPE_I64);
2471 ext = lduw_code(s->pc);
2474 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2475 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2476 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2477 disas_undef(s, insn);
2481 /* MAC with load. */
2482 tmp = gen_lea(s, insn, OS_LONG);
2483 addr = gen_new_qreg(QMODE_I32);
2484 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2485 /* Load the value now to ensure correct exception behavior.
2486 Perform writeback after reading the MAC inputs. */
2487 loadval = gen_load(s, OS_LONG, addr, 0);
2490 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2491 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2493 loadval = addr = NULL_QREG;
2494 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2495 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2498 gen_mac_clear_flags();
2501 /* Disabled because conditional branches clobber temporary vars. */
2502 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2503 /* Skip the multiply if we know we will ignore it. */
2504 l1 = gen_new_label();
2505 tmp = gen_new_qreg(QMODE_I32);
2506 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2507 gen_op_jmp_nz32(tmp, l1);
2511 if ((ext & 0x0800) == 0) {
2513 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2514 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2516 if (s->env->macsr & MACSR_FI) {
2517 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2519 if (s->env->macsr & MACSR_SU)
2520 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2522 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2523 switch ((ext >> 9) & 3) {
2525 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2528 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2534 /* Save the overflow flag from the multiply. */
2535 saved_flags = gen_new_qreg(QMODE_I32);
2536 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2538 saved_flags = NULL_QREG;
2542 /* Disabled because conditional branches clobber temporary vars. */
2543 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2544 /* Skip the accumulate if the value is already saturated. */
2545 l1 = gen_new_label();
2546 tmp = gen_new_qreg(QMODE_I32);
2547 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2548 gen_op_jmp_nz32(tmp, l1);
2553 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2555 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2557 if (s->env->macsr & MACSR_FI)
2558 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2559 else if (s->env->macsr & MACSR_SU)
2560 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2562 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2565 /* Disabled because conditional branches clobber temporary vars. */
2571 /* Dual accumulate variant. */
2572 acc = (ext >> 2) & 3;
2573 /* Restore the overflow flag from the multiplier. */
2574 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2576 /* Disabled because conditional branches clobber temporary vars. */
2577 if ((s->env->macsr & MACSR_OMC) != 0) {
2578 /* Skip the accumulate if the value is already saturated. */
2579 l1 = gen_new_label();
2580 tmp = gen_new_qreg(QMODE_I32);
2581 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2582 gen_op_jmp_nz32(tmp, l1);
2586 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2588 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2589 if (s->env->macsr & MACSR_FI)
2590 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2591 else if (s->env->macsr & MACSR_SU)
2592 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2594 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2596 /* Disabled because conditional branches clobber temporary vars. */
2601 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2605 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2606 tcg_gen_mov_i32(rw, loadval);
2607 /* FIXME: Should address writeback happen with the masked or
2609 switch ((insn >> 3) & 7) {
2610 case 3: /* Post-increment. */
2611 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2613 case 4: /* Pre-decrement. */
2614 tcg_gen_mov_i32(AREG(insn, 0), addr);
2619 DISAS_INSN(from_mac)
2625 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2626 accnum = (insn >> 9) & 3;
2627 acc = MACREG(accnum);
2628 if (s->env->macsr & MACSR_FI) {
2629 gen_helper_get_macf(cpu_env, rx, acc);
2630 } else if ((s->env->macsr & MACSR_OMC) == 0) {
2631 tcg_gen_trunc_i64_i32(rx, acc);
2632 } else if (s->env->macsr & MACSR_SU) {
2633 gen_helper_get_macs(rx, acc);
2635 gen_helper_get_macu(rx, acc);
2638 tcg_gen_movi_i64(acc, 0);
2639 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2643 DISAS_INSN(move_mac)
2645 /* FIXME: This can be done without a helper. */
2649 dest = tcg_const_i32((insn >> 9) & 3);
2650 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2651 gen_mac_clear_flags();
2652 gen_helper_mac_set_flags(cpu_env, dest);
2655 DISAS_INSN(from_macsr)
2659 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2660 tcg_gen_mov_i32(reg, QREG_MACSR);
2663 DISAS_INSN(from_mask)
2666 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2667 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2670 DISAS_INSN(from_mext)
2674 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2675 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2676 if (s->env->macsr & MACSR_FI)
2677 gen_helper_get_mac_extf(reg, cpu_env, acc);
2679 gen_helper_get_mac_exti(reg, cpu_env, acc);
2682 DISAS_INSN(macsr_to_ccr)
2684 tcg_gen_movi_i32(QREG_CC_X, 0);
2685 tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2686 s->cc_op = CC_OP_FLAGS;
2694 accnum = (insn >> 9) & 3;
2695 acc = MACREG(accnum);
2696 SRC_EA(val, OS_LONG, 0, NULL);
2697 if (s->env->macsr & MACSR_FI) {
2698 tcg_gen_ext_i32_i64(acc, val);
2699 tcg_gen_shli_i64(acc, acc, 8);
2700 } else if (s->env->macsr & MACSR_SU) {
2701 tcg_gen_ext_i32_i64(acc, val);
2703 tcg_gen_extu_i32_i64(acc, val);
2705 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2706 gen_mac_clear_flags();
2707 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2710 DISAS_INSN(to_macsr)
2713 SRC_EA(val, OS_LONG, 0, NULL);
2714 gen_helper_set_macsr(cpu_env, val);
2721 SRC_EA(val, OS_LONG, 0, NULL);
2722 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2729 SRC_EA(val, OS_LONG, 0, NULL);
2730 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2731 if (s->env->macsr & MACSR_FI)
2732 gen_helper_set_mac_extf(cpu_env, val, acc);
2733 else if (s->env->macsr & MACSR_SU)
2734 gen_helper_set_mac_exts(cpu_env, val, acc);
2736 gen_helper_set_mac_extu(cpu_env, val, acc);
2739 static disas_proc opcode_table[65536];
2742 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2748 /* Sanity check. All set bits must be included in the mask. */
2749 if (opcode & ~mask) {
2751 "qemu internal error: bogus opcode definition %04x/%04x\n",
2755 /* This could probably be cleverer. For now just optimize the case where
2756 the top bits are known. */
2757 /* Find the first zero bit in the mask. */
2759 while ((i & mask) != 0)
2761 /* Iterate over all combinations of this and lower bits. */
2766 from = opcode & ~(i - 1);
2768 for (i = from; i < to; i++) {
2769 if ((i & mask) == opcode)
2770 opcode_table[i] = proc;
2774 /* Register m68k opcode handlers. Order is important.
2775 Later insn override earlier ones. */
2776 void register_m68k_insns (CPUM68KState *env)
2778 #define INSN(name, opcode, mask, feature) do { \
2779 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2780 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2782 INSN(undef, 0000, 0000, CF_ISA_A);
2783 INSN(arith_im, 0080, fff8, CF_ISA_A);
2784 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
2785 INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2786 INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2787 INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2788 INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2789 INSN(arith_im, 0280, fff8, CF_ISA_A);
2790 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
2791 INSN(arith_im, 0480, fff8, CF_ISA_A);
2792 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
2793 INSN(arith_im, 0680, fff8, CF_ISA_A);
2794 INSN(bitop_im, 0800, ffc0, CF_ISA_A);
2795 INSN(bitop_im, 0840, ffc0, CF_ISA_A);
2796 INSN(bitop_im, 0880, ffc0, CF_ISA_A);
2797 INSN(bitop_im, 08c0, ffc0, CF_ISA_A);
2798 INSN(arith_im, 0a80, fff8, CF_ISA_A);
2799 INSN(arith_im, 0c00, ff38, CF_ISA_A);
2800 INSN(move, 1000, f000, CF_ISA_A);
2801 INSN(move, 2000, f000, CF_ISA_A);
2802 INSN(move, 3000, f000, CF_ISA_A);
2803 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
2804 INSN(negx, 4080, fff8, CF_ISA_A);
2805 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2806 INSN(lea, 41c0, f1c0, CF_ISA_A);
2807 INSN(clr, 4200, ff00, CF_ISA_A);
2808 INSN(undef, 42c0, ffc0, CF_ISA_A);
2809 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2810 INSN(neg, 4480, fff8, CF_ISA_A);
2811 INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2812 INSN(not, 4680, fff8, CF_ISA_A);
2813 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2814 INSN(pea, 4840, ffc0, CF_ISA_A);
2815 INSN(swap, 4840, fff8, CF_ISA_A);
2816 INSN(movem, 48c0, fbc0, CF_ISA_A);
2817 INSN(ext, 4880, fff8, CF_ISA_A);
2818 INSN(ext, 48c0, fff8, CF_ISA_A);
2819 INSN(ext, 49c0, fff8, CF_ISA_A);
2820 INSN(tst, 4a00, ff00, CF_ISA_A);
2821 INSN(tas, 4ac0, ffc0, CF_ISA_B);
2822 INSN(halt, 4ac8, ffff, CF_ISA_A);
2823 INSN(pulse, 4acc, ffff, CF_ISA_A);
2824 INSN(illegal, 4afc, ffff, CF_ISA_A);
2825 INSN(mull, 4c00, ffc0, CF_ISA_A);
2826 INSN(divl, 4c40, ffc0, CF_ISA_A);
2827 INSN(sats, 4c80, fff8, CF_ISA_B);
2828 INSN(trap, 4e40, fff0, CF_ISA_A);
2829 INSN(link, 4e50, fff8, CF_ISA_A);
2830 INSN(unlk, 4e58, fff8, CF_ISA_A);
2831 INSN(move_to_usp, 4e60, fff8, USP);
2832 INSN(move_from_usp, 4e68, fff8, USP);
2833 INSN(nop, 4e71, ffff, CF_ISA_A);
2834 INSN(stop, 4e72, ffff, CF_ISA_A);
2835 INSN(rte, 4e73, ffff, CF_ISA_A);
2836 INSN(rts, 4e75, ffff, CF_ISA_A);
2837 INSN(movec, 4e7b, ffff, CF_ISA_A);
2838 INSN(jump, 4e80, ffc0, CF_ISA_A);
2839 INSN(jump, 4ec0, ffc0, CF_ISA_A);
2840 INSN(addsubq, 5180, f1c0, CF_ISA_A);
2841 INSN(scc, 50c0, f0f8, CF_ISA_A);
2842 INSN(addsubq, 5080, f1c0, CF_ISA_A);
2843 INSN(tpf, 51f8, fff8, CF_ISA_A);
2845 /* Branch instructions. */
2846 INSN(branch, 6000, f000, CF_ISA_A);
2847 /* Disable long branch instructions, then add back the ones we want. */
2848 INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */
2849 INSN(branch, 60ff, f0ff, CF_ISA_B);
2850 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
2851 INSN(branch, 60ff, ffff, BRAL);
2853 INSN(moveq, 7000, f100, CF_ISA_A);
2854 INSN(mvzs, 7100, f100, CF_ISA_B);
2855 INSN(or, 8000, f000, CF_ISA_A);
2856 INSN(divw, 80c0, f0c0, CF_ISA_A);
2857 INSN(addsub, 9000, f000, CF_ISA_A);
2858 INSN(subx, 9180, f1f8, CF_ISA_A);
2859 INSN(suba, 91c0, f1c0, CF_ISA_A);
2861 INSN(undef_mac, a000, f000, CF_ISA_A);
2862 INSN(mac, a000, f100, CF_EMAC);
2863 INSN(from_mac, a180, f9b0, CF_EMAC);
2864 INSN(move_mac, a110, f9fc, CF_EMAC);
2865 INSN(from_macsr,a980, f9f0, CF_EMAC);
2866 INSN(from_mask, ad80, fff0, CF_EMAC);
2867 INSN(from_mext, ab80, fbf0, CF_EMAC);
2868 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2869 INSN(to_mac, a100, f9c0, CF_EMAC);
2870 INSN(to_macsr, a900, ffc0, CF_EMAC);
2871 INSN(to_mext, ab00, fbc0, CF_EMAC);
2872 INSN(to_mask, ad00, ffc0, CF_EMAC);
2874 INSN(mov3q, a140, f1c0, CF_ISA_B);
2875 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
2876 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
2877 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2878 INSN(cmp, b080, f1c0, CF_ISA_A);
2879 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
2880 INSN(eor, b180, f1c0, CF_ISA_A);
2881 INSN(and, c000, f000, CF_ISA_A);
2882 INSN(mulw, c0c0, f0c0, CF_ISA_A);
2883 INSN(addsub, d000, f000, CF_ISA_A);
2884 INSN(addx, d180, f1f8, CF_ISA_A);
2885 INSN(adda, d1c0, f1c0, CF_ISA_A);
2886 INSN(shift_im, e080, f0f0, CF_ISA_A);
2887 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2888 INSN(undef_fpu, f000, f000, CF_ISA_A);
2889 INSN(fpu, f200, ffc0, CF_FPU);
2890 INSN(fbcc, f280, ffc0, CF_FPU);
2891 INSN(frestore, f340, ffc0, CF_FPU);
2892 INSN(fsave, f340, ffc0, CF_FPU);
2893 INSN(intouch, f340, ffc0, CF_ISA_A);
2894 INSN(cpushl, f428, ff38, CF_ISA_A);
2895 INSN(wddata, fb00, ff00, CF_ISA_A);
2896 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
2900 /* ??? Some of this implementation is not exception safe. We should always
2901 write back the result to memory before setting the condition codes. */
2902 static void disas_m68k_insn(CPUState * env, DisasContext *s)
2906 insn = lduw_code(s->pc);
2909 opcode_table[insn](s, insn);
2912 /* generate intermediate code for basic block 'tb'. */
2914 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
2917 DisasContext dc1, *dc = &dc1;
2918 uint16_t *gen_opc_end;
2920 target_ulong pc_start;
2926 /* generate intermediate code */
2931 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2934 dc->is_jmp = DISAS_NEXT;
2936 dc->cc_op = CC_OP_DYNAMIC;
2937 dc->singlestep_enabled = env->singlestep_enabled;
2938 dc->fpcr = env->fpcr;
2939 dc->user = (env->sr & SR_S) == 0;
2941 dc->mactmp = NULL_QREG;
2944 max_insns = tb->cflags & CF_COUNT_MASK;
2946 max_insns = CF_COUNT_MASK;
2950 pc_offset = dc->pc - pc_start;
2951 gen_throws_exception = NULL;
2952 if (env->nb_breakpoints > 0) {
2953 for(j = 0; j < env->nb_breakpoints; j++) {
2954 if (env->breakpoints[j] == dc->pc) {
2955 gen_exception(dc, dc->pc, EXCP_DEBUG);
2956 dc->is_jmp = DISAS_JUMP;
2964 j = gen_opc_ptr - gen_opc_buf;
2968 gen_opc_instr_start[lj++] = 0;
2970 gen_opc_pc[lj] = dc->pc;
2971 gen_opc_instr_start[lj] = 1;
2972 gen_opc_icount[lj] = num_insns;
2974 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2976 last_cc_op = dc->cc_op;
2977 dc->insn_pc = dc->pc;
2978 disas_m68k_insn(env, dc);
2981 /* Terminate the TB on memory ops if watchpoints are present. */
2982 /* FIXME: This should be replaced by the deterministic execution
2983 * IRQ raising bits. */
2984 if (dc->is_mem && env->nb_watchpoints)
2986 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2987 !env->singlestep_enabled &&
2988 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
2989 num_insns < max_insns);
2991 if (tb->cflags & CF_LAST_IO)
2993 if (unlikely(env->singlestep_enabled)) {
2994 /* Make sure the pc is updated, and raise a debug exception. */
2996 gen_flush_cc_op(dc);
2997 tcg_gen_movi_i32(QREG_PC, dc->pc);
2999 gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG));
3001 switch(dc->is_jmp) {
3003 gen_flush_cc_op(dc);
3004 gen_jmp_tb(dc, 0, dc->pc);
3009 gen_flush_cc_op(dc);
3010 /* indicate that the hash table must be used to find the next TB */
3014 /* nothing more to generate */
3018 gen_icount_end(tb, num_insns);
3019 *gen_opc_ptr = INDEX_op_end;
3022 if (loglevel & CPU_LOG_TB_IN_ASM) {
3023 fprintf(logfile, "----------------\n");
3024 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
3025 target_disas(logfile, pc_start, dc->pc - pc_start, 0);
3026 fprintf(logfile, "\n");
3030 j = gen_opc_ptr - gen_opc_buf;
3033 gen_opc_instr_start[lj++] = 0;
3035 tb->size = dc->pc - pc_start;
3036 tb->icount = num_insns;
3040 //expand_target_qops();
3043 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
3045 gen_intermediate_code_internal(env, tb, 0);
3048 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
3050 gen_intermediate_code_internal(env, tb, 1);
3053 void cpu_dump_state(CPUState *env, FILE *f,
3054 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3060 for (i = 0; i < 8; i++)
3062 u.d = env->fregs[i];
3063 cpu_fprintf (f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3064 i, env->dregs[i], i, env->aregs[i],
3065 i, u.l.upper, u.l.lower, *(double *)&u.d);
3067 cpu_fprintf (f, "PC = %08x ", env->pc);
3069 cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3070 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3071 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3072 cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3075 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3076 unsigned long searched_pc, int pc_pos, void *puc)
3078 env->pc = gen_opc_pc[pc_pos];