4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
38 //#define DEBUG_DISPATCH 1
40 /* Fake floating point. */
41 #define tcg_gen_mov_f64 tcg_gen_mov_i64
42 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
43 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
45 #define DEFO32(name, offset) static TCGv QREG_##name;
46 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
47 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
53 static TCGv_ptr cpu_env;
55 static char cpu_reg_names[3*8*3 + 5*4];
56 static TCGv cpu_dregs[8];
57 static TCGv cpu_aregs[8];
58 static TCGv_i64 cpu_fregs[8];
59 static TCGv_i64 cpu_macc[4];
61 #define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
62 #define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
63 #define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
64 #define MACREG(acc) cpu_macc[acc]
65 #define QREG_SP cpu_aregs[7]
67 static TCGv NULL_QREG;
68 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
69 /* Used to distinguish stores from bad addressing modes. */
70 static TCGv store_dummy;
72 #include "gen-icount.h"
74 void m68k_tcg_init(void)
79 #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, offset), #name);
80 #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, offset), #name);
81 #define DEFF64(name, offset) DEFO64(name, offset)
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
90 for (i = 0; i < 8; i++) {
92 cpu_dregs[i] = tcg_global_mem_new(TCG_AREG0,
93 offsetof(CPUM68KState, dregs[i]), p);
96 cpu_aregs[i] = tcg_global_mem_new(TCG_AREG0,
97 offsetof(CPUM68KState, aregs[i]), p);
100 cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0,
101 offsetof(CPUM68KState, fregs[i]), p);
104 for (i = 0; i < 4; i++) {
105 sprintf(p, "ACC%d", i);
106 cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUM68KState, macc[i]), p);
111 NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
112 store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
118 static inline void qemu_assert(int cond, const char *msg)
121 fprintf (stderr, "badness: %s\n", msg);
126 /* internal defines */
127 typedef struct DisasContext {
129 target_ulong insn_pc; /* Start of the current instruction. */
135 struct TranslationBlock *tb;
136 int singlestep_enabled;
142 #define DISAS_JUMP_NEXT 4
144 #if defined(CONFIG_USER_ONLY)
147 #define IS_USER(s) s->user
150 /* XXX: move that elsewhere */
151 /* ??? Fix exceptions. */
152 static void *gen_throws_exception;
153 #define gen_last_qop NULL
161 typedef void (*disas_proc)(DisasContext *, uint16_t);
163 #ifdef DEBUG_DISPATCH
164 #define DISAS_INSN(name) \
165 static void real_disas_##name (DisasContext *s, uint16_t insn); \
166 static void disas_##name (DisasContext *s, uint16_t insn) { \
167 qemu_log("Dispatch " #name "\n"); \
168 real_disas_##name(s, insn); } \
169 static void real_disas_##name (DisasContext *s, uint16_t insn)
171 #define DISAS_INSN(name) \
172 static void disas_##name (DisasContext *s, uint16_t insn)
175 /* FIXME: Remove this. */
176 #define gen_im32(val) tcg_const_i32(val)
178 /* Generate a load from the specified address. Narrow values are
179 sign extended to full register width. */
180 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
183 int index = IS_USER(s);
185 tmp = tcg_temp_new_i32();
189 tcg_gen_qemu_ld8s(tmp, addr, index);
191 tcg_gen_qemu_ld8u(tmp, addr, index);
195 tcg_gen_qemu_ld16s(tmp, addr, index);
197 tcg_gen_qemu_ld16u(tmp, addr, index);
201 tcg_gen_qemu_ld32u(tmp, addr, index);
204 qemu_assert(0, "bad load size");
206 gen_throws_exception = gen_last_qop;
210 static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
213 int index = IS_USER(s);
215 tmp = tcg_temp_new_i64();
216 tcg_gen_qemu_ldf64(tmp, addr, index);
217 gen_throws_exception = gen_last_qop;
221 /* Generate a store. */
222 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
224 int index = IS_USER(s);
228 tcg_gen_qemu_st8(val, addr, index);
231 tcg_gen_qemu_st16(val, addr, index);
235 tcg_gen_qemu_st32(val, addr, index);
238 qemu_assert(0, "bad store size");
240 gen_throws_exception = gen_last_qop;
243 static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
245 int index = IS_USER(s);
247 tcg_gen_qemu_stf64(val, addr, index);
248 gen_throws_exception = gen_last_qop;
257 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
258 otherwise generate a store. */
259 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
262 if (what == EA_STORE) {
263 gen_store(s, opsize, addr, val);
266 return gen_load(s, opsize, addr, what == EA_LOADS);
270 /* Read a 32-bit immediate constant. */
271 static inline uint32_t read_im32(DisasContext *s)
274 im = ((uint32_t)lduw_code(s->pc)) << 16;
276 im |= lduw_code(s->pc);
281 /* Calculate and address index. */
282 static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
287 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
288 if ((ext & 0x800) == 0) {
289 tcg_gen_ext16s_i32(tmp, add);
292 scale = (ext >> 9) & 3;
294 tcg_gen_shli_i32(tmp, add, scale);
300 /* Handle a base + index + displacement effective addresss.
301 A NULL_QREG base means pc-relative. */
302 static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base)
311 ext = lduw_code(s->pc);
314 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
318 /* full extension word format */
319 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
322 if ((ext & 0x30) > 0x10) {
323 /* base displacement */
324 if ((ext & 0x30) == 0x20) {
325 bd = (int16_t)lduw_code(s->pc);
333 tmp = tcg_temp_new();
334 if ((ext & 0x44) == 0) {
336 add = gen_addr_index(ext, tmp);
340 if ((ext & 0x80) == 0) {
341 /* base not suppressed */
342 if (IS_NULL_QREG(base)) {
343 base = gen_im32(offset + bd);
346 if (!IS_NULL_QREG(add)) {
347 tcg_gen_add_i32(tmp, add, base);
353 if (!IS_NULL_QREG(add)) {
355 tcg_gen_addi_i32(tmp, add, bd);
361 if ((ext & 3) != 0) {
362 /* memory indirect */
363 base = gen_load(s, OS_LONG, add, 0);
364 if ((ext & 0x44) == 4) {
365 add = gen_addr_index(ext, tmp);
366 tcg_gen_add_i32(tmp, add, base);
372 /* outer displacement */
373 if ((ext & 3) == 2) {
374 od = (int16_t)lduw_code(s->pc);
383 tcg_gen_addi_i32(tmp, add, od);
388 /* brief extension word format */
389 tmp = tcg_temp_new();
390 add = gen_addr_index(ext, tmp);
391 if (!IS_NULL_QREG(base)) {
392 tcg_gen_add_i32(tmp, add, base);
394 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
396 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
403 /* Update the CPU env CC_OP state. */
404 static inline void gen_flush_cc_op(DisasContext *s)
406 if (s->cc_op != CC_OP_DYNAMIC)
407 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
410 /* Evaluate all the CC flags. */
411 static inline void gen_flush_flags(DisasContext *s)
413 if (s->cc_op == CC_OP_FLAGS)
416 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
417 s->cc_op = CC_OP_FLAGS;
420 static void gen_logic_cc(DisasContext *s, TCGv val)
422 tcg_gen_mov_i32(QREG_CC_DEST, val);
423 s->cc_op = CC_OP_LOGIC;
426 static void gen_update_cc_add(TCGv dest, TCGv src)
428 tcg_gen_mov_i32(QREG_CC_DEST, dest);
429 tcg_gen_mov_i32(QREG_CC_SRC, src);
432 static inline int opsize_bytes(int opsize)
435 case OS_BYTE: return 1;
436 case OS_WORD: return 2;
437 case OS_LONG: return 4;
438 case OS_SINGLE: return 4;
439 case OS_DOUBLE: return 8;
441 qemu_assert(0, "bad operand size");
446 /* Assign value to a register. If the width is less than the register width
447 only the low part of the register is set. */
448 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
453 tcg_gen_andi_i32(reg, reg, 0xffffff00);
454 tmp = tcg_temp_new();
455 tcg_gen_ext8u_i32(tmp, val);
456 tcg_gen_or_i32(reg, reg, tmp);
459 tcg_gen_andi_i32(reg, reg, 0xffff0000);
460 tmp = tcg_temp_new();
461 tcg_gen_ext16u_i32(tmp, val);
462 tcg_gen_or_i32(reg, reg, tmp);
466 tcg_gen_mov_i32(reg, val);
469 qemu_assert(0, "Bad operand size");
474 /* Sign or zero extend a value. */
475 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
481 tmp = tcg_temp_new();
483 tcg_gen_ext8s_i32(tmp, val);
485 tcg_gen_ext8u_i32(tmp, val);
488 tmp = tcg_temp_new();
490 tcg_gen_ext16s_i32(tmp, val);
492 tcg_gen_ext16u_i32(tmp, val);
499 qemu_assert(0, "Bad operand size");
504 /* Generate code for an "effective address". Does not adjust the base
505 register for autoincrement addressing modes. */
506 static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize)
513 switch ((insn >> 3) & 7) {
514 case 0: /* Data register direct. */
515 case 1: /* Address register direct. */
517 case 2: /* Indirect register */
518 case 3: /* Indirect postincrement. */
519 return AREG(insn, 0);
520 case 4: /* Indirect predecrememnt. */
522 tmp = tcg_temp_new();
523 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
525 case 5: /* Indirect displacement. */
527 tmp = tcg_temp_new();
528 ext = lduw_code(s->pc);
530 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
532 case 6: /* Indirect index + displacement. */
534 return gen_lea_indexed(s, opsize, reg);
537 case 0: /* Absolute short. */
538 offset = ldsw_code(s->pc);
540 return gen_im32(offset);
541 case 1: /* Absolute long. */
542 offset = read_im32(s);
543 return gen_im32(offset);
544 case 2: /* pc displacement */
545 tmp = tcg_temp_new();
547 offset += ldsw_code(s->pc);
549 return gen_im32(offset);
550 case 3: /* pc index+displacement. */
551 return gen_lea_indexed(s, opsize, NULL_QREG);
552 case 4: /* Immediate. */
557 /* Should never happen. */
561 /* Helper function for gen_ea. Reuse the computed address between the
562 for read/write operands. */
563 static inline TCGv gen_ea_once(DisasContext *s, uint16_t insn, int opsize,
564 TCGv val, TCGv *addrp, ea_what what)
568 if (addrp && what == EA_STORE) {
571 tmp = gen_lea(s, insn, opsize);
572 if (IS_NULL_QREG(tmp))
577 return gen_ldst(s, opsize, tmp, val, what);
580 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
581 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
582 ADDRP is non-null for readwrite operands. */
583 static TCGv gen_ea(DisasContext *s, uint16_t insn, int opsize, TCGv val,
584 TCGv *addrp, ea_what what)
590 switch ((insn >> 3) & 7) {
591 case 0: /* Data register direct. */
593 if (what == EA_STORE) {
594 gen_partset_reg(opsize, reg, val);
597 return gen_extend(reg, opsize, what == EA_LOADS);
599 case 1: /* Address register direct. */
601 if (what == EA_STORE) {
602 tcg_gen_mov_i32(reg, val);
605 return gen_extend(reg, opsize, what == EA_LOADS);
607 case 2: /* Indirect register */
609 return gen_ldst(s, opsize, reg, val, what);
610 case 3: /* Indirect postincrement. */
612 result = gen_ldst(s, opsize, reg, val, what);
613 /* ??? This is not exception safe. The instruction may still
614 fault after this point. */
615 if (what == EA_STORE || !addrp)
616 tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
618 case 4: /* Indirect predecrememnt. */
621 if (addrp && what == EA_STORE) {
624 tmp = gen_lea(s, insn, opsize);
625 if (IS_NULL_QREG(tmp))
630 result = gen_ldst(s, opsize, tmp, val, what);
631 /* ??? This is not exception safe. The instruction may still
632 fault after this point. */
633 if (what == EA_STORE || !addrp) {
635 tcg_gen_mov_i32(reg, tmp);
639 case 5: /* Indirect displacement. */
640 case 6: /* Indirect index + displacement. */
641 return gen_ea_once(s, insn, opsize, val, addrp, what);
644 case 0: /* Absolute short. */
645 case 1: /* Absolute long. */
646 case 2: /* pc displacement */
647 case 3: /* pc index+displacement. */
648 return gen_ea_once(s, insn, opsize, val, addrp, what);
649 case 4: /* Immediate. */
650 /* Sign extend values for consistency. */
653 if (what == EA_LOADS)
654 offset = ldsb_code(s->pc + 1);
656 offset = ldub_code(s->pc + 1);
660 if (what == EA_LOADS)
661 offset = ldsw_code(s->pc);
663 offset = lduw_code(s->pc);
667 offset = read_im32(s);
670 qemu_assert(0, "Bad immediate operand");
672 return tcg_const_i32(offset);
677 /* Should never happen. */
681 /* This generates a conditional branch, clobbering all temporaries. */
682 static void gen_jmpcc(DisasContext *s, int cond, int l1)
686 /* TODO: Optimize compare/branch pairs rather than always flushing
687 flag state to CC_OP_FLAGS. */
695 case 2: /* HI (!C && !Z) */
696 tmp = tcg_temp_new();
697 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
700 case 3: /* LS (C || Z) */
701 tmp = tcg_temp_new();
702 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
703 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
705 case 4: /* CC (!C) */
706 tmp = tcg_temp_new();
707 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
708 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
711 tmp = tcg_temp_new();
712 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
713 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
715 case 6: /* NE (!Z) */
716 tmp = tcg_temp_new();
717 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
718 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
721 tmp = tcg_temp_new();
722 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
723 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
725 case 8: /* VC (!V) */
726 tmp = tcg_temp_new();
727 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
728 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
731 tmp = tcg_temp_new();
732 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
733 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
735 case 10: /* PL (!N) */
736 tmp = tcg_temp_new();
737 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
738 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
740 case 11: /* MI (N) */
741 tmp = tcg_temp_new();
742 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
743 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
745 case 12: /* GE (!(N ^ V)) */
746 tmp = tcg_temp_new();
747 assert(CCF_V == (CCF_N >> 2));
748 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
749 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
750 tcg_gen_andi_i32(tmp, tmp, CCF_V);
751 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
753 case 13: /* LT (N ^ V) */
754 tmp = tcg_temp_new();
755 assert(CCF_V == (CCF_N >> 2));
756 tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
757 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
758 tcg_gen_andi_i32(tmp, tmp, CCF_V);
759 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
761 case 14: /* GT (!(Z || (N ^ V))) */
762 tmp = tcg_temp_new();
763 assert(CCF_V == (CCF_N >> 2));
764 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
765 tcg_gen_shri_i32(tmp, tmp, 2);
766 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
767 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
768 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
770 case 15: /* LE (Z || (N ^ V)) */
771 tmp = tcg_temp_new();
772 assert(CCF_V == (CCF_N >> 2));
773 tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
774 tcg_gen_shri_i32(tmp, tmp, 2);
775 tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
776 tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
777 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
780 /* Should ever happen. */
791 l1 = gen_new_label();
792 cond = (insn >> 8) & 0xf;
794 tcg_gen_andi_i32(reg, reg, 0xffffff00);
795 /* This is safe because we modify the reg directly, with no other values
797 gen_jmpcc(s, cond ^ 1, l1);
798 tcg_gen_ori_i32(reg, reg, 0xff);
802 /* Force a TB lookup after an instruction that changes the CPU state. */
803 static void gen_lookup_tb(DisasContext *s)
806 tcg_gen_movi_i32(QREG_PC, s->pc);
807 s->is_jmp = DISAS_UPDATE;
810 /* Generate a jump to an immediate address. */
811 static void gen_jmp_im(DisasContext *s, uint32_t dest)
814 tcg_gen_movi_i32(QREG_PC, dest);
815 s->is_jmp = DISAS_JUMP;
818 /* Generate a jump to the address in qreg DEST. */
819 static void gen_jmp(DisasContext *s, TCGv dest)
822 tcg_gen_mov_i32(QREG_PC, dest);
823 s->is_jmp = DISAS_JUMP;
826 static void gen_exception(DisasContext *s, uint32_t where, int nr)
829 gen_jmp_im(s, where);
830 gen_helper_raise_exception(tcg_const_i32(nr));
833 static inline void gen_addr_fault(DisasContext *s)
835 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
838 #define SRC_EA(result, opsize, op_sign, addrp) do { \
839 result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
840 if (IS_NULL_QREG(result)) { \
846 #define DEST_EA(insn, opsize, val, addrp) do { \
847 TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
848 if (IS_NULL_QREG(ea_result)) { \
854 /* Generate a jump to an immediate address. */
855 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
857 TranslationBlock *tb;
860 if (unlikely(s->singlestep_enabled)) {
861 gen_exception(s, dest, EXCP_DEBUG);
862 } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
863 (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
865 tcg_gen_movi_i32(QREG_PC, dest);
866 tcg_gen_exit_tb((long)tb + n);
871 s->is_jmp = DISAS_TB_JUMP;
874 DISAS_INSN(undef_mac)
876 gen_exception(s, s->pc - 2, EXCP_LINEA);
879 DISAS_INSN(undef_fpu)
881 gen_exception(s, s->pc - 2, EXCP_LINEF);
886 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
887 cpu_abort(cpu_single_env, "Illegal instruction: %04x @ %08x",
898 sign = (insn & 0x100) != 0;
900 tmp = tcg_temp_new();
902 tcg_gen_ext16s_i32(tmp, reg);
904 tcg_gen_ext16u_i32(tmp, reg);
905 SRC_EA(src, OS_WORD, sign, NULL);
906 tcg_gen_mul_i32(tmp, tmp, src);
907 tcg_gen_mov_i32(reg, tmp);
908 /* Unlike m68k, coldfire always clears the overflow bit. */
909 gen_logic_cc(s, tmp);
919 sign = (insn & 0x100) != 0;
922 tcg_gen_ext16s_i32(QREG_DIV1, reg);
924 tcg_gen_ext16u_i32(QREG_DIV1, reg);
926 SRC_EA(src, OS_WORD, sign, NULL);
927 tcg_gen_mov_i32(QREG_DIV2, src);
929 gen_helper_divs(cpu_env, tcg_const_i32(1));
931 gen_helper_divu(cpu_env, tcg_const_i32(1));
934 tmp = tcg_temp_new();
935 src = tcg_temp_new();
936 tcg_gen_ext16u_i32(tmp, QREG_DIV1);
937 tcg_gen_shli_i32(src, QREG_DIV2, 16);
938 tcg_gen_or_i32(reg, tmp, src);
939 s->cc_op = CC_OP_FLAGS;
949 ext = lduw_code(s->pc);
952 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
957 tcg_gen_mov_i32(QREG_DIV1, num);
958 SRC_EA(den, OS_LONG, 0, NULL);
959 tcg_gen_mov_i32(QREG_DIV2, den);
961 gen_helper_divs(cpu_env, tcg_const_i32(0));
963 gen_helper_divu(cpu_env, tcg_const_i32(0));
965 if ((ext & 7) == ((ext >> 12) & 7)) {
967 tcg_gen_mov_i32 (reg, QREG_DIV1);
970 tcg_gen_mov_i32 (reg, QREG_DIV2);
972 s->cc_op = CC_OP_FLAGS;
984 add = (insn & 0x4000) != 0;
986 dest = tcg_temp_new();
988 SRC_EA(tmp, OS_LONG, 0, &addr);
992 SRC_EA(src, OS_LONG, 0, NULL);
995 tcg_gen_add_i32(dest, tmp, src);
996 gen_helper_xflag_lt(QREG_CC_X, dest, src);
997 s->cc_op = CC_OP_ADD;
999 gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1000 tcg_gen_sub_i32(dest, tmp, src);
1001 s->cc_op = CC_OP_SUB;
1003 gen_update_cc_add(dest, src);
1005 DEST_EA(insn, OS_LONG, dest, &addr);
1007 tcg_gen_mov_i32(reg, dest);
1012 /* Reverse the order of the bits in REG. */
1016 reg = DREG(insn, 0);
1017 gen_helper_bitrev(reg, reg);
1020 DISAS_INSN(bitop_reg)
1030 if ((insn & 0x38) != 0)
1034 op = (insn >> 6) & 3;
1035 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1036 src2 = DREG(insn, 9);
1037 dest = tcg_temp_new();
1040 tmp = tcg_temp_new();
1041 if (opsize == OS_BYTE)
1042 tcg_gen_andi_i32(tmp, src2, 7);
1044 tcg_gen_andi_i32(tmp, src2, 31);
1046 tmp = tcg_temp_new();
1047 tcg_gen_shr_i32(tmp, src1, src2);
1048 tcg_gen_andi_i32(tmp, tmp, 1);
1049 tcg_gen_shli_i32(tmp, tmp, 2);
1050 /* Clear CCF_Z if bit set. */
1051 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1052 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1054 tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1057 tcg_gen_xor_i32(dest, src1, tmp);
1060 tcg_gen_not_i32(tmp, tmp);
1061 tcg_gen_and_i32(dest, src1, tmp);
1064 tcg_gen_or_i32(dest, src1, tmp);
1070 DEST_EA(insn, opsize, dest, &addr);
1076 reg = DREG(insn, 0);
1078 gen_helper_sats(reg, reg, QREG_CC_DEST);
1079 gen_logic_cc(s, reg);
1082 static void gen_push(DisasContext *s, TCGv val)
1086 tmp = tcg_temp_new();
1087 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1088 gen_store(s, OS_LONG, tmp, val);
1089 tcg_gen_mov_i32(QREG_SP, tmp);
1101 mask = lduw_code(s->pc);
1103 tmp = gen_lea(s, insn, OS_LONG);
1104 if (IS_NULL_QREG(tmp)) {
1108 addr = tcg_temp_new();
1109 tcg_gen_mov_i32(addr, tmp);
1110 is_load = ((insn & 0x0400) != 0);
1111 for (i = 0; i < 16; i++, mask >>= 1) {
1118 tmp = gen_load(s, OS_LONG, addr, 0);
1119 tcg_gen_mov_i32(reg, tmp);
1121 gen_store(s, OS_LONG, addr, reg);
1124 tcg_gen_addi_i32(addr, addr, 4);
1129 DISAS_INSN(bitop_im)
1139 if ((insn & 0x38) != 0)
1143 op = (insn >> 6) & 3;
1145 bitnum = lduw_code(s->pc);
1147 if (bitnum & 0xff00) {
1148 disas_undef(s, insn);
1152 SRC_EA(src1, opsize, 0, op ? &addr: NULL);
1155 if (opsize == OS_BYTE)
1161 tmp = tcg_temp_new();
1162 assert (CCF_Z == (1 << 2));
1164 tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1165 else if (bitnum < 2)
1166 tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1168 tcg_gen_mov_i32(tmp, src1);
1169 tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1170 /* Clear CCF_Z if bit set. */
1171 tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1172 tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1176 tcg_gen_xori_i32(tmp, src1, mask);
1179 tcg_gen_andi_i32(tmp, src1, ~mask);
1182 tcg_gen_ori_i32(tmp, src1, mask);
1187 DEST_EA(insn, opsize, tmp, &addr);
1191 DISAS_INSN(arith_im)
1199 op = (insn >> 9) & 7;
1200 SRC_EA(src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1202 dest = tcg_temp_new();
1205 tcg_gen_ori_i32(dest, src1, im);
1206 gen_logic_cc(s, dest);
1209 tcg_gen_andi_i32(dest, src1, im);
1210 gen_logic_cc(s, dest);
1213 tcg_gen_mov_i32(dest, src1);
1214 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1215 tcg_gen_subi_i32(dest, dest, im);
1216 gen_update_cc_add(dest, gen_im32(im));
1217 s->cc_op = CC_OP_SUB;
1220 tcg_gen_mov_i32(dest, src1);
1221 tcg_gen_addi_i32(dest, dest, im);
1222 gen_update_cc_add(dest, gen_im32(im));
1223 gen_helper_xflag_lt(QREG_CC_X, dest, gen_im32(im));
1224 s->cc_op = CC_OP_ADD;
1227 tcg_gen_xori_i32(dest, src1, im);
1228 gen_logic_cc(s, dest);
1231 tcg_gen_mov_i32(dest, src1);
1232 tcg_gen_subi_i32(dest, dest, im);
1233 gen_update_cc_add(dest, gen_im32(im));
1234 s->cc_op = CC_OP_SUB;
1240 DEST_EA(insn, OS_LONG, dest, &addr);
1248 reg = DREG(insn, 0);
1249 tcg_gen_bswap32_i32(reg, reg);
1259 switch (insn >> 12) {
1260 case 1: /* move.b */
1263 case 2: /* move.l */
1266 case 3: /* move.w */
1272 SRC_EA(src, opsize, 1, NULL);
1273 op = (insn >> 6) & 7;
1276 /* The value will already have been sign extended. */
1277 dest = AREG(insn, 9);
1278 tcg_gen_mov_i32(dest, src);
1282 dest_ea = ((insn >> 9) & 7) | (op << 3);
1283 DEST_EA(dest_ea, opsize, src, NULL);
1284 /* This will be correct because loads sign extend. */
1285 gen_logic_cc(s, src);
1294 reg = DREG(insn, 0);
1295 gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1303 reg = AREG(insn, 9);
1304 tmp = gen_lea(s, insn, OS_LONG);
1305 if (IS_NULL_QREG(tmp)) {
1309 tcg_gen_mov_i32(reg, tmp);
1316 switch ((insn >> 6) & 3) {
1329 DEST_EA(insn, opsize, gen_im32(0), NULL);
1330 gen_logic_cc(s, gen_im32(0));
1333 static TCGv gen_get_ccr(DisasContext *s)
1338 dest = tcg_temp_new();
1339 tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1340 tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1344 DISAS_INSN(move_from_ccr)
1349 ccr = gen_get_ccr(s);
1350 reg = DREG(insn, 0);
1351 gen_partset_reg(OS_WORD, reg, ccr);
1359 reg = DREG(insn, 0);
1360 src1 = tcg_temp_new();
1361 tcg_gen_mov_i32(src1, reg);
1362 tcg_gen_neg_i32(reg, src1);
1363 s->cc_op = CC_OP_SUB;
1364 gen_update_cc_add(reg, src1);
1365 gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1366 s->cc_op = CC_OP_SUB;
1369 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1371 tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1372 tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1374 gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1378 static void gen_set_sr(DisasContext *s, uint16_t insn, int ccr_only)
1383 s->cc_op = CC_OP_FLAGS;
1384 if ((insn & 0x38) == 0)
1386 tmp = tcg_temp_new();
1387 reg = DREG(insn, 0);
1388 tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1389 tcg_gen_shri_i32(tmp, reg, 4);
1390 tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1392 gen_helper_set_sr(cpu_env, reg);
1395 else if ((insn & 0x3f) == 0x3c)
1398 val = lduw_code(s->pc);
1400 gen_set_sr_im(s, val, ccr_only);
1403 disas_undef(s, insn);
1406 DISAS_INSN(move_to_ccr)
1408 gen_set_sr(s, insn, 1);
1415 reg = DREG(insn, 0);
1416 tcg_gen_not_i32(reg, reg);
1417 gen_logic_cc(s, reg);
1426 src1 = tcg_temp_new();
1427 src2 = tcg_temp_new();
1428 reg = DREG(insn, 0);
1429 tcg_gen_shli_i32(src1, reg, 16);
1430 tcg_gen_shri_i32(src2, reg, 16);
1431 tcg_gen_or_i32(reg, src1, src2);
1432 gen_logic_cc(s, reg);
1439 tmp = gen_lea(s, insn, OS_LONG);
1440 if (IS_NULL_QREG(tmp)) {
1453 reg = DREG(insn, 0);
1454 op = (insn >> 6) & 7;
1455 tmp = tcg_temp_new();
1457 tcg_gen_ext16s_i32(tmp, reg);
1459 tcg_gen_ext8s_i32(tmp, reg);
1461 gen_partset_reg(OS_WORD, reg, tmp);
1463 tcg_gen_mov_i32(reg, tmp);
1464 gen_logic_cc(s, tmp);
1472 switch ((insn >> 6) & 3) {
1485 SRC_EA(tmp, opsize, 1, NULL);
1486 gen_logic_cc(s, tmp);
1491 /* Implemented as a NOP. */
1496 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1499 /* ??? This should be atomic. */
1506 dest = tcg_temp_new();
1507 SRC_EA(src1, OS_BYTE, 1, &addr);
1508 gen_logic_cc(s, src1);
1509 tcg_gen_ori_i32(dest, src1, 0x80);
1510 DEST_EA(insn, OS_BYTE, dest, &addr);
1520 /* The upper 32 bits of the product are discarded, so
1521 muls.l and mulu.l are functionally equivalent. */
1522 ext = lduw_code(s->pc);
1525 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1528 reg = DREG(ext, 12);
1529 SRC_EA(src1, OS_LONG, 0, NULL);
1530 dest = tcg_temp_new();
1531 tcg_gen_mul_i32(dest, src1, reg);
1532 tcg_gen_mov_i32(reg, dest);
1533 /* Unlike m68k, coldfire always clears the overflow bit. */
1534 gen_logic_cc(s, dest);
1543 offset = ldsw_code(s->pc);
1545 reg = AREG(insn, 0);
1546 tmp = tcg_temp_new();
1547 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1548 gen_store(s, OS_LONG, tmp, reg);
1549 if ((insn & 7) != 7)
1550 tcg_gen_mov_i32(reg, tmp);
1551 tcg_gen_addi_i32(QREG_SP, tmp, offset);
1560 src = tcg_temp_new();
1561 reg = AREG(insn, 0);
1562 tcg_gen_mov_i32(src, reg);
1563 tmp = gen_load(s, OS_LONG, src, 0);
1564 tcg_gen_mov_i32(reg, tmp);
1565 tcg_gen_addi_i32(QREG_SP, src, 4);
1576 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1577 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1585 /* Load the target address first to ensure correct exception
1587 tmp = gen_lea(s, insn, OS_LONG);
1588 if (IS_NULL_QREG(tmp)) {
1592 if ((insn & 0x40) == 0) {
1594 gen_push(s, gen_im32(s->pc));
1607 SRC_EA(src1, OS_LONG, 0, &addr);
1608 val = (insn >> 9) & 7;
1611 dest = tcg_temp_new();
1612 tcg_gen_mov_i32(dest, src1);
1613 if ((insn & 0x38) == 0x08) {
1614 /* Don't update condition codes if the destination is an
1615 address register. */
1616 if (insn & 0x0100) {
1617 tcg_gen_subi_i32(dest, dest, val);
1619 tcg_gen_addi_i32(dest, dest, val);
1622 src2 = gen_im32(val);
1623 if (insn & 0x0100) {
1624 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1625 tcg_gen_subi_i32(dest, dest, val);
1626 s->cc_op = CC_OP_SUB;
1628 tcg_gen_addi_i32(dest, dest, val);
1629 gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1630 s->cc_op = CC_OP_ADD;
1632 gen_update_cc_add(dest, src2);
1634 DEST_EA(insn, OS_LONG, dest, &addr);
1640 case 2: /* One extension word. */
1643 case 3: /* Two extension words. */
1646 case 4: /* No extension words. */
1649 disas_undef(s, insn);
1661 op = (insn >> 8) & 0xf;
1662 offset = (int8_t)insn;
1664 offset = ldsw_code(s->pc);
1666 } else if (offset == -1) {
1667 offset = read_im32(s);
1671 gen_push(s, gen_im32(s->pc));
1676 l1 = gen_new_label();
1677 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1678 gen_jmp_tb(s, 1, base + offset);
1680 gen_jmp_tb(s, 0, s->pc);
1682 /* Unconditional branch. */
1683 gen_jmp_tb(s, 0, base + offset);
1692 tcg_gen_movi_i32(DREG(insn, 9), val);
1693 gen_logic_cc(s, tcg_const_i32(val));
1706 SRC_EA(src, opsize, (insn & 0x80) == 0, NULL);
1707 reg = DREG(insn, 9);
1708 tcg_gen_mov_i32(reg, src);
1709 gen_logic_cc(s, src);
1719 reg = DREG(insn, 9);
1720 dest = tcg_temp_new();
1722 SRC_EA(src, OS_LONG, 0, &addr);
1723 tcg_gen_or_i32(dest, src, reg);
1724 DEST_EA(insn, OS_LONG, dest, &addr);
1726 SRC_EA(src, OS_LONG, 0, NULL);
1727 tcg_gen_or_i32(dest, src, reg);
1728 tcg_gen_mov_i32(reg, dest);
1730 gen_logic_cc(s, dest);
1738 SRC_EA(src, OS_LONG, 0, NULL);
1739 reg = AREG(insn, 9);
1740 tcg_gen_sub_i32(reg, reg, src);
1749 reg = DREG(insn, 9);
1750 src = DREG(insn, 0);
1751 gen_helper_subx_cc(reg, cpu_env, reg, src);
1759 val = (insn >> 9) & 7;
1762 src = gen_im32(val);
1763 gen_logic_cc(s, src);
1764 DEST_EA(insn, OS_LONG, src, NULL);
1775 op = (insn >> 6) & 3;
1779 s->cc_op = CC_OP_CMPB;
1783 s->cc_op = CC_OP_CMPW;
1787 s->cc_op = CC_OP_SUB;
1792 SRC_EA(src, opsize, 1, NULL);
1793 reg = DREG(insn, 9);
1794 dest = tcg_temp_new();
1795 tcg_gen_sub_i32(dest, reg, src);
1796 gen_update_cc_add(dest, src);
1811 SRC_EA(src, opsize, 1, NULL);
1812 reg = AREG(insn, 9);
1813 dest = tcg_temp_new();
1814 tcg_gen_sub_i32(dest, reg, src);
1815 gen_update_cc_add(dest, src);
1816 s->cc_op = CC_OP_SUB;
1826 SRC_EA(src, OS_LONG, 0, &addr);
1827 reg = DREG(insn, 9);
1828 dest = tcg_temp_new();
1829 tcg_gen_xor_i32(dest, src, reg);
1830 gen_logic_cc(s, dest);
1831 DEST_EA(insn, OS_LONG, dest, &addr);
1841 reg = DREG(insn, 9);
1842 dest = tcg_temp_new();
1844 SRC_EA(src, OS_LONG, 0, &addr);
1845 tcg_gen_and_i32(dest, src, reg);
1846 DEST_EA(insn, OS_LONG, dest, &addr);
1848 SRC_EA(src, OS_LONG, 0, NULL);
1849 tcg_gen_and_i32(dest, src, reg);
1850 tcg_gen_mov_i32(reg, dest);
1852 gen_logic_cc(s, dest);
1860 SRC_EA(src, OS_LONG, 0, NULL);
1861 reg = AREG(insn, 9);
1862 tcg_gen_add_i32(reg, reg, src);
1871 reg = DREG(insn, 9);
1872 src = DREG(insn, 0);
1873 gen_helper_addx_cc(reg, cpu_env, reg, src);
1874 s->cc_op = CC_OP_FLAGS;
1877 /* TODO: This could be implemented without helper functions. */
1878 DISAS_INSN(shift_im)
1884 reg = DREG(insn, 0);
1885 tmp = (insn >> 9) & 7;
1888 shift = gen_im32(tmp);
1889 /* No need to flush flags becuse we know we will set C flag. */
1891 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1894 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1896 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1899 s->cc_op = CC_OP_SHIFT;
1902 DISAS_INSN(shift_reg)
1907 reg = DREG(insn, 0);
1908 shift = DREG(insn, 9);
1909 /* Shift by zero leaves C flag unmodified. */
1912 gen_helper_shl_cc(reg, cpu_env, reg, shift);
1915 gen_helper_shr_cc(reg, cpu_env, reg, shift);
1917 gen_helper_sar_cc(reg, cpu_env, reg, shift);
1920 s->cc_op = CC_OP_SHIFT;
1926 reg = DREG(insn, 0);
1927 gen_logic_cc(s, reg);
1928 gen_helper_ff1(reg, reg);
1931 static TCGv gen_get_sr(DisasContext *s)
1936 ccr = gen_get_ccr(s);
1937 sr = tcg_temp_new();
1938 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1939 tcg_gen_or_i32(sr, sr, ccr);
1949 ext = lduw_code(s->pc);
1951 if (ext != 0x46FC) {
1952 gen_exception(s, addr, EXCP_UNSUPPORTED);
1955 ext = lduw_code(s->pc);
1957 if (IS_USER(s) || (ext & SR_S) == 0) {
1958 gen_exception(s, addr, EXCP_PRIVILEGE);
1961 gen_push(s, gen_get_sr(s));
1962 gen_set_sr_im(s, ext, 0);
1965 DISAS_INSN(move_from_sr)
1971 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1975 reg = DREG(insn, 0);
1976 gen_partset_reg(OS_WORD, reg, sr);
1979 DISAS_INSN(move_to_sr)
1982 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1985 gen_set_sr(s, insn, 0);
1989 DISAS_INSN(move_from_usp)
1992 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1995 /* TODO: Implement USP. */
1996 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1999 DISAS_INSN(move_to_usp)
2002 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2005 /* TODO: Implement USP. */
2006 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2011 gen_exception(s, s->pc, EXCP_HALT_INSN);
2019 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2023 ext = lduw_code(s->pc);
2026 gen_set_sr_im(s, ext, 0);
2027 tcg_gen_movi_i32(QREG_HALTED, 1);
2028 gen_exception(s, s->pc, EXCP_HLT);
2034 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2037 gen_exception(s, s->pc - 2, EXCP_RTE);
2046 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2050 ext = lduw_code(s->pc);
2054 reg = AREG(ext, 12);
2056 reg = DREG(ext, 12);
2058 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2065 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2068 /* ICache fetch. Implement as no-op. */
2074 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2077 /* Cache push/invalidate. Implement as no-op. */
2082 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2088 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2091 /* TODO: Implement wdebug. */
2092 qemu_assert(0, "WDEBUG not implemented");
2097 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2100 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2101 immediately before the next FP instruction is executed. */
2115 ext = lduw_code(s->pc);
2117 opmode = ext & 0x7f;
2118 switch ((ext >> 13) & 7) {
2123 case 3: /* fmove out */
2125 tmp32 = tcg_temp_new_i32();
2127 /* ??? TODO: Proper behavior on overflow. */
2128 switch ((ext >> 10) & 7) {
2131 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2135 gen_helper_f64_to_f32(tmp32, cpu_env, src);
2139 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2141 case 5: /* OS_DOUBLE */
2142 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2143 switch ((insn >> 3) & 7) {
2148 tcg_gen_addi_i32(tmp32, tmp32, -8);
2151 offset = ldsw_code(s->pc);
2153 tcg_gen_addi_i32(tmp32, tmp32, offset);
2158 gen_store64(s, tmp32, src);
2159 switch ((insn >> 3) & 7) {
2161 tcg_gen_addi_i32(tmp32, tmp32, 8);
2162 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2165 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2168 tcg_temp_free_i32(tmp32);
2172 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2177 DEST_EA(insn, opsize, tmp32, NULL);
2178 tcg_temp_free_i32(tmp32);
2180 case 4: /* fmove to control register. */
2181 switch ((ext >> 10) & 7) {
2183 /* Not implemented. Ignore writes. */
2188 cpu_abort(NULL, "Unimplemented: fmove to control %d",
2192 case 5: /* fmove from control register. */
2193 switch ((ext >> 10) & 7) {
2195 /* Not implemented. Always return zero. */
2196 tmp32 = gen_im32(0);
2201 cpu_abort(NULL, "Unimplemented: fmove from control %d",
2205 DEST_EA(insn, OS_LONG, tmp32, NULL);
2207 case 6: /* fmovem */
2213 if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2215 tmp32 = gen_lea(s, insn, OS_LONG);
2216 if (IS_NULL_QREG(tmp32)) {
2220 addr = tcg_temp_new_i32();
2221 tcg_gen_mov_i32(addr, tmp32);
2223 for (i = 0; i < 8; i++) {
2227 if (ext & (1 << 13)) {
2229 tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2232 tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2234 if (ext & (mask - 1))
2235 tcg_gen_addi_i32(addr, addr, 8);
2239 tcg_temp_free_i32(addr);
2243 if (ext & (1 << 14)) {
2244 /* Source effective address. */
2245 switch ((ext >> 10) & 7) {
2246 case 0: opsize = OS_LONG; break;
2247 case 1: opsize = OS_SINGLE; break;
2248 case 4: opsize = OS_WORD; break;
2249 case 5: opsize = OS_DOUBLE; break;
2250 case 6: opsize = OS_BYTE; break;
2254 if (opsize == OS_DOUBLE) {
2255 tmp32 = tcg_temp_new_i32();
2256 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2257 switch ((insn >> 3) & 7) {
2262 tcg_gen_addi_i32(tmp32, tmp32, -8);
2265 offset = ldsw_code(s->pc);
2267 tcg_gen_addi_i32(tmp32, tmp32, offset);
2270 offset = ldsw_code(s->pc);
2271 offset += s->pc - 2;
2273 tcg_gen_addi_i32(tmp32, tmp32, offset);
2278 src = gen_load64(s, tmp32);
2279 switch ((insn >> 3) & 7) {
2281 tcg_gen_addi_i32(tmp32, tmp32, 8);
2282 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2285 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2288 tcg_temp_free_i32(tmp32);
2290 SRC_EA(tmp32, opsize, 1, NULL);
2291 src = tcg_temp_new_i64();
2296 gen_helper_i32_to_f64(src, cpu_env, tmp32);
2299 gen_helper_f32_to_f64(src, cpu_env, tmp32);
2304 /* Source register. */
2305 src = FREG(ext, 10);
2307 dest = FREG(ext, 7);
2308 res = tcg_temp_new_i64();
2310 tcg_gen_mov_f64(res, dest);
2314 case 0: case 0x40: case 0x44: /* fmove */
2315 tcg_gen_mov_f64(res, src);
2318 gen_helper_iround_f64(res, cpu_env, src);
2321 case 3: /* fintrz */
2322 gen_helper_itrunc_f64(res, cpu_env, src);
2325 case 4: case 0x41: case 0x45: /* fsqrt */
2326 gen_helper_sqrt_f64(res, cpu_env, src);
2328 case 0x18: case 0x58: case 0x5c: /* fabs */
2329 gen_helper_abs_f64(res, src);
2331 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2332 gen_helper_chs_f64(res, src);
2334 case 0x20: case 0x60: case 0x64: /* fdiv */
2335 gen_helper_div_f64(res, cpu_env, res, src);
2337 case 0x22: case 0x62: case 0x66: /* fadd */
2338 gen_helper_add_f64(res, cpu_env, res, src);
2340 case 0x23: case 0x63: case 0x67: /* fmul */
2341 gen_helper_mul_f64(res, cpu_env, res, src);
2343 case 0x28: case 0x68: case 0x6c: /* fsub */
2344 gen_helper_sub_f64(res, cpu_env, res, src);
2346 case 0x38: /* fcmp */
2347 gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2351 case 0x3a: /* ftst */
2352 tcg_gen_mov_f64(res, src);
2359 if (ext & (1 << 14)) {
2360 tcg_temp_free_i64(src);
2363 if (opmode & 0x40) {
2364 if ((opmode & 0x4) != 0)
2366 } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2371 TCGv tmp = tcg_temp_new_i32();
2372 gen_helper_f64_to_f32(tmp, cpu_env, res);
2373 gen_helper_f32_to_f64(res, cpu_env, tmp);
2374 tcg_temp_free_i32(tmp);
2376 tcg_gen_mov_f64(QREG_FP_RESULT, res);
2378 tcg_gen_mov_f64(dest, res);
2380 tcg_temp_free_i64(res);
2383 /* FIXME: Is this right for offset addressing modes? */
2385 disas_undef_fpu(s, insn);
2396 offset = ldsw_code(s->pc);
2398 if (insn & (1 << 6)) {
2399 offset = (offset << 16) | lduw_code(s->pc);
2403 l1 = gen_new_label();
2404 /* TODO: Raise BSUN exception. */
2405 flag = tcg_temp_new();
2406 gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2407 /* Jump to l1 if condition is true. */
2408 switch (insn & 0xf) {
2411 case 1: /* eq (=0) */
2412 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2414 case 2: /* ogt (=1) */
2415 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2417 case 3: /* oge (=0 or =1) */
2418 tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2420 case 4: /* olt (=-1) */
2421 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2423 case 5: /* ole (=-1 or =0) */
2424 tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2426 case 6: /* ogl (=-1 or =1) */
2427 tcg_gen_andi_i32(flag, flag, 1);
2428 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2430 case 7: /* or (=2) */
2431 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2433 case 8: /* un (<2) */
2434 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2436 case 9: /* ueq (=0 or =2) */
2437 tcg_gen_andi_i32(flag, flag, 1);
2438 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2440 case 10: /* ugt (>0) */
2441 tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2443 case 11: /* uge (>=0) */
2444 tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2446 case 12: /* ult (=-1 or =2) */
2447 tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2449 case 13: /* ule (!=1) */
2450 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2452 case 14: /* ne (!=0) */
2453 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2459 gen_jmp_tb(s, 0, s->pc);
2461 gen_jmp_tb(s, 1, addr + offset);
2464 DISAS_INSN(frestore)
2466 /* TODO: Implement frestore. */
2467 qemu_assert(0, "FRESTORE not implemented");
2472 /* TODO: Implement fsave. */
2473 qemu_assert(0, "FSAVE not implemented");
2476 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2478 TCGv tmp = tcg_temp_new();
2479 if (s->env->macsr & MACSR_FI) {
2481 tcg_gen_andi_i32(tmp, val, 0xffff0000);
2483 tcg_gen_shli_i32(tmp, val, 16);
2484 } else if (s->env->macsr & MACSR_SU) {
2486 tcg_gen_sari_i32(tmp, val, 16);
2488 tcg_gen_ext16s_i32(tmp, val);
2491 tcg_gen_shri_i32(tmp, val, 16);
2493 tcg_gen_ext16u_i32(tmp, val);
2498 static void gen_mac_clear_flags(void)
2500 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2501 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2517 s->mactmp = tcg_temp_new_i64();
2521 ext = lduw_code(s->pc);
2524 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2525 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2526 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2527 disas_undef(s, insn);
2531 /* MAC with load. */
2532 tmp = gen_lea(s, insn, OS_LONG);
2533 addr = tcg_temp_new();
2534 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2535 /* Load the value now to ensure correct exception behavior.
2536 Perform writeback after reading the MAC inputs. */
2537 loadval = gen_load(s, OS_LONG, addr, 0);
2540 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2541 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2543 loadval = addr = NULL_QREG;
2544 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2545 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2548 gen_mac_clear_flags();
2551 /* Disabled because conditional branches clobber temporary vars. */
2552 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2553 /* Skip the multiply if we know we will ignore it. */
2554 l1 = gen_new_label();
2555 tmp = tcg_temp_new();
2556 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2557 gen_op_jmp_nz32(tmp, l1);
2561 if ((ext & 0x0800) == 0) {
2563 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2564 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2566 if (s->env->macsr & MACSR_FI) {
2567 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2569 if (s->env->macsr & MACSR_SU)
2570 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2572 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2573 switch ((ext >> 9) & 3) {
2575 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2578 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2584 /* Save the overflow flag from the multiply. */
2585 saved_flags = tcg_temp_new();
2586 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2588 saved_flags = NULL_QREG;
2592 /* Disabled because conditional branches clobber temporary vars. */
2593 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2594 /* Skip the accumulate if the value is already saturated. */
2595 l1 = gen_new_label();
2596 tmp = tcg_temp_new();
2597 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2598 gen_op_jmp_nz32(tmp, l1);
2603 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2605 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2607 if (s->env->macsr & MACSR_FI)
2608 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2609 else if (s->env->macsr & MACSR_SU)
2610 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2612 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2615 /* Disabled because conditional branches clobber temporary vars. */
2621 /* Dual accumulate variant. */
2622 acc = (ext >> 2) & 3;
2623 /* Restore the overflow flag from the multiplier. */
2624 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2626 /* Disabled because conditional branches clobber temporary vars. */
2627 if ((s->env->macsr & MACSR_OMC) != 0) {
2628 /* Skip the accumulate if the value is already saturated. */
2629 l1 = gen_new_label();
2630 tmp = tcg_temp_new();
2631 gen_op_and32(tmp, QREG_MACSR, gen_im32(MACSR_PAV0 << acc));
2632 gen_op_jmp_nz32(tmp, l1);
2636 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2638 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2639 if (s->env->macsr & MACSR_FI)
2640 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2641 else if (s->env->macsr & MACSR_SU)
2642 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2644 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2646 /* Disabled because conditional branches clobber temporary vars. */
2651 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2655 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2656 tcg_gen_mov_i32(rw, loadval);
2657 /* FIXME: Should address writeback happen with the masked or
2659 switch ((insn >> 3) & 7) {
2660 case 3: /* Post-increment. */
2661 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2663 case 4: /* Pre-decrement. */
2664 tcg_gen_mov_i32(AREG(insn, 0), addr);
2669 DISAS_INSN(from_mac)
2675 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2676 accnum = (insn >> 9) & 3;
2677 acc = MACREG(accnum);
2678 if (s->env->macsr & MACSR_FI) {
2679 gen_helper_get_macf(rx, cpu_env, acc);
2680 } else if ((s->env->macsr & MACSR_OMC) == 0) {
2681 tcg_gen_trunc_i64_i32(rx, acc);
2682 } else if (s->env->macsr & MACSR_SU) {
2683 gen_helper_get_macs(rx, acc);
2685 gen_helper_get_macu(rx, acc);
2688 tcg_gen_movi_i64(acc, 0);
2689 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2693 DISAS_INSN(move_mac)
2695 /* FIXME: This can be done without a helper. */
2699 dest = tcg_const_i32((insn >> 9) & 3);
2700 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2701 gen_mac_clear_flags();
2702 gen_helper_mac_set_flags(cpu_env, dest);
2705 DISAS_INSN(from_macsr)
2709 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2710 tcg_gen_mov_i32(reg, QREG_MACSR);
2713 DISAS_INSN(from_mask)
2716 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2717 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2720 DISAS_INSN(from_mext)
2724 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2725 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2726 if (s->env->macsr & MACSR_FI)
2727 gen_helper_get_mac_extf(reg, cpu_env, acc);
2729 gen_helper_get_mac_exti(reg, cpu_env, acc);
2732 DISAS_INSN(macsr_to_ccr)
2734 tcg_gen_movi_i32(QREG_CC_X, 0);
2735 tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2736 s->cc_op = CC_OP_FLAGS;
2744 accnum = (insn >> 9) & 3;
2745 acc = MACREG(accnum);
2746 SRC_EA(val, OS_LONG, 0, NULL);
2747 if (s->env->macsr & MACSR_FI) {
2748 tcg_gen_ext_i32_i64(acc, val);
2749 tcg_gen_shli_i64(acc, acc, 8);
2750 } else if (s->env->macsr & MACSR_SU) {
2751 tcg_gen_ext_i32_i64(acc, val);
2753 tcg_gen_extu_i32_i64(acc, val);
2755 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2756 gen_mac_clear_flags();
2757 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2760 DISAS_INSN(to_macsr)
2763 SRC_EA(val, OS_LONG, 0, NULL);
2764 gen_helper_set_macsr(cpu_env, val);
2771 SRC_EA(val, OS_LONG, 0, NULL);
2772 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2779 SRC_EA(val, OS_LONG, 0, NULL);
2780 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2781 if (s->env->macsr & MACSR_FI)
2782 gen_helper_set_mac_extf(cpu_env, val, acc);
2783 else if (s->env->macsr & MACSR_SU)
2784 gen_helper_set_mac_exts(cpu_env, val, acc);
2786 gen_helper_set_mac_extu(cpu_env, val, acc);
2789 static disas_proc opcode_table[65536];
2792 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2798 /* Sanity check. All set bits must be included in the mask. */
2799 if (opcode & ~mask) {
2801 "qemu internal error: bogus opcode definition %04x/%04x\n",
2805 /* This could probably be cleverer. For now just optimize the case where
2806 the top bits are known. */
2807 /* Find the first zero bit in the mask. */
2809 while ((i & mask) != 0)
2811 /* Iterate over all combinations of this and lower bits. */
2816 from = opcode & ~(i - 1);
2818 for (i = from; i < to; i++) {
2819 if ((i & mask) == opcode)
2820 opcode_table[i] = proc;
2824 /* Register m68k opcode handlers. Order is important.
2825 Later insn override earlier ones. */
2826 void register_m68k_insns (CPUM68KState *env)
2828 #define INSN(name, opcode, mask, feature) do { \
2829 if (m68k_feature(env, M68K_FEATURE_##feature)) \
2830 register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2832 INSN(undef, 0000, 0000, CF_ISA_A);
2833 INSN(arith_im, 0080, fff8, CF_ISA_A);
2834 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
2835 INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2836 INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2837 INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2838 INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2839 INSN(arith_im, 0280, fff8, CF_ISA_A);
2840 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
2841 INSN(arith_im, 0480, fff8, CF_ISA_A);
2842 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
2843 INSN(arith_im, 0680, fff8, CF_ISA_A);
2844 INSN(bitop_im, 0800, ffc0, CF_ISA_A);
2845 INSN(bitop_im, 0840, ffc0, CF_ISA_A);
2846 INSN(bitop_im, 0880, ffc0, CF_ISA_A);
2847 INSN(bitop_im, 08c0, ffc0, CF_ISA_A);
2848 INSN(arith_im, 0a80, fff8, CF_ISA_A);
2849 INSN(arith_im, 0c00, ff38, CF_ISA_A);
2850 INSN(move, 1000, f000, CF_ISA_A);
2851 INSN(move, 2000, f000, CF_ISA_A);
2852 INSN(move, 3000, f000, CF_ISA_A);
2853 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
2854 INSN(negx, 4080, fff8, CF_ISA_A);
2855 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2856 INSN(lea, 41c0, f1c0, CF_ISA_A);
2857 INSN(clr, 4200, ff00, CF_ISA_A);
2858 INSN(undef, 42c0, ffc0, CF_ISA_A);
2859 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2860 INSN(neg, 4480, fff8, CF_ISA_A);
2861 INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2862 INSN(not, 4680, fff8, CF_ISA_A);
2863 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2864 INSN(pea, 4840, ffc0, CF_ISA_A);
2865 INSN(swap, 4840, fff8, CF_ISA_A);
2866 INSN(movem, 48c0, fbc0, CF_ISA_A);
2867 INSN(ext, 4880, fff8, CF_ISA_A);
2868 INSN(ext, 48c0, fff8, CF_ISA_A);
2869 INSN(ext, 49c0, fff8, CF_ISA_A);
2870 INSN(tst, 4a00, ff00, CF_ISA_A);
2871 INSN(tas, 4ac0, ffc0, CF_ISA_B);
2872 INSN(halt, 4ac8, ffff, CF_ISA_A);
2873 INSN(pulse, 4acc, ffff, CF_ISA_A);
2874 INSN(illegal, 4afc, ffff, CF_ISA_A);
2875 INSN(mull, 4c00, ffc0, CF_ISA_A);
2876 INSN(divl, 4c40, ffc0, CF_ISA_A);
2877 INSN(sats, 4c80, fff8, CF_ISA_B);
2878 INSN(trap, 4e40, fff0, CF_ISA_A);
2879 INSN(link, 4e50, fff8, CF_ISA_A);
2880 INSN(unlk, 4e58, fff8, CF_ISA_A);
2881 INSN(move_to_usp, 4e60, fff8, USP);
2882 INSN(move_from_usp, 4e68, fff8, USP);
2883 INSN(nop, 4e71, ffff, CF_ISA_A);
2884 INSN(stop, 4e72, ffff, CF_ISA_A);
2885 INSN(rte, 4e73, ffff, CF_ISA_A);
2886 INSN(rts, 4e75, ffff, CF_ISA_A);
2887 INSN(movec, 4e7b, ffff, CF_ISA_A);
2888 INSN(jump, 4e80, ffc0, CF_ISA_A);
2889 INSN(jump, 4ec0, ffc0, CF_ISA_A);
2890 INSN(addsubq, 5180, f1c0, CF_ISA_A);
2891 INSN(scc, 50c0, f0f8, CF_ISA_A);
2892 INSN(addsubq, 5080, f1c0, CF_ISA_A);
2893 INSN(tpf, 51f8, fff8, CF_ISA_A);
2895 /* Branch instructions. */
2896 INSN(branch, 6000, f000, CF_ISA_A);
2897 /* Disable long branch instructions, then add back the ones we want. */
2898 INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */
2899 INSN(branch, 60ff, f0ff, CF_ISA_B);
2900 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
2901 INSN(branch, 60ff, ffff, BRAL);
2903 INSN(moveq, 7000, f100, CF_ISA_A);
2904 INSN(mvzs, 7100, f100, CF_ISA_B);
2905 INSN(or, 8000, f000, CF_ISA_A);
2906 INSN(divw, 80c0, f0c0, CF_ISA_A);
2907 INSN(addsub, 9000, f000, CF_ISA_A);
2908 INSN(subx, 9180, f1f8, CF_ISA_A);
2909 INSN(suba, 91c0, f1c0, CF_ISA_A);
2911 INSN(undef_mac, a000, f000, CF_ISA_A);
2912 INSN(mac, a000, f100, CF_EMAC);
2913 INSN(from_mac, a180, f9b0, CF_EMAC);
2914 INSN(move_mac, a110, f9fc, CF_EMAC);
2915 INSN(from_macsr,a980, f9f0, CF_EMAC);
2916 INSN(from_mask, ad80, fff0, CF_EMAC);
2917 INSN(from_mext, ab80, fbf0, CF_EMAC);
2918 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2919 INSN(to_mac, a100, f9c0, CF_EMAC);
2920 INSN(to_macsr, a900, ffc0, CF_EMAC);
2921 INSN(to_mext, ab00, fbc0, CF_EMAC);
2922 INSN(to_mask, ad00, ffc0, CF_EMAC);
2924 INSN(mov3q, a140, f1c0, CF_ISA_B);
2925 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
2926 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
2927 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2928 INSN(cmp, b080, f1c0, CF_ISA_A);
2929 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
2930 INSN(eor, b180, f1c0, CF_ISA_A);
2931 INSN(and, c000, f000, CF_ISA_A);
2932 INSN(mulw, c0c0, f0c0, CF_ISA_A);
2933 INSN(addsub, d000, f000, CF_ISA_A);
2934 INSN(addx, d180, f1f8, CF_ISA_A);
2935 INSN(adda, d1c0, f1c0, CF_ISA_A);
2936 INSN(shift_im, e080, f0f0, CF_ISA_A);
2937 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2938 INSN(undef_fpu, f000, f000, CF_ISA_A);
2939 INSN(fpu, f200, ffc0, CF_FPU);
2940 INSN(fbcc, f280, ffc0, CF_FPU);
2941 INSN(frestore, f340, ffc0, CF_FPU);
2942 INSN(fsave, f340, ffc0, CF_FPU);
2943 INSN(intouch, f340, ffc0, CF_ISA_A);
2944 INSN(cpushl, f428, ff38, CF_ISA_A);
2945 INSN(wddata, fb00, ff00, CF_ISA_A);
2946 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
2950 /* ??? Some of this implementation is not exception safe. We should always
2951 write back the result to memory before setting the condition codes. */
2952 static void disas_m68k_insn(CPUState * env, DisasContext *s)
2956 insn = lduw_code(s->pc);
2959 opcode_table[insn](s, insn);
2962 /* generate intermediate code for basic block 'tb'. */
2964 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
2967 DisasContext dc1, *dc = &dc1;
2968 uint16_t *gen_opc_end;
2971 target_ulong pc_start;
2977 /* generate intermediate code */
2982 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2985 dc->is_jmp = DISAS_NEXT;
2987 dc->cc_op = CC_OP_DYNAMIC;
2988 dc->singlestep_enabled = env->singlestep_enabled;
2989 dc->fpcr = env->fpcr;
2990 dc->user = (env->sr & SR_S) == 0;
2995 max_insns = tb->cflags & CF_COUNT_MASK;
2997 max_insns = CF_COUNT_MASK;
3001 pc_offset = dc->pc - pc_start;
3002 gen_throws_exception = NULL;
3003 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
3004 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
3005 if (bp->pc == dc->pc) {
3006 gen_exception(dc, dc->pc, EXCP_DEBUG);
3007 dc->is_jmp = DISAS_JUMP;
3015 j = gen_opc_ptr - gen_opc_buf;
3019 gen_opc_instr_start[lj++] = 0;
3021 gen_opc_pc[lj] = dc->pc;
3022 gen_opc_instr_start[lj] = 1;
3023 gen_opc_icount[lj] = num_insns;
3025 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3027 last_cc_op = dc->cc_op;
3028 dc->insn_pc = dc->pc;
3029 disas_m68k_insn(env, dc);
3031 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
3032 !env->singlestep_enabled &&
3034 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3035 num_insns < max_insns);
3037 if (tb->cflags & CF_LAST_IO)
3039 if (unlikely(env->singlestep_enabled)) {
3040 /* Make sure the pc is updated, and raise a debug exception. */
3042 gen_flush_cc_op(dc);
3043 tcg_gen_movi_i32(QREG_PC, dc->pc);
3045 gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG));
3047 switch(dc->is_jmp) {
3049 gen_flush_cc_op(dc);
3050 gen_jmp_tb(dc, 0, dc->pc);
3055 gen_flush_cc_op(dc);
3056 /* indicate that the hash table must be used to find the next TB */
3060 /* nothing more to generate */
3064 gen_icount_end(tb, num_insns);
3065 *gen_opc_ptr = INDEX_op_end;
3068 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3069 qemu_log("----------------\n");
3070 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3071 log_target_disas(pc_start, dc->pc - pc_start, 0);
3076 j = gen_opc_ptr - gen_opc_buf;
3079 gen_opc_instr_start[lj++] = 0;
3081 tb->size = dc->pc - pc_start;
3082 tb->icount = num_insns;
3086 //expand_target_qops();
3089 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
3091 gen_intermediate_code_internal(env, tb, 0);
3094 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
3096 gen_intermediate_code_internal(env, tb, 1);
3099 void cpu_dump_state(CPUState *env, FILE *f,
3100 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3106 for (i = 0; i < 8; i++)
3108 u.d = env->fregs[i];
3109 cpu_fprintf (f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3110 i, env->dregs[i], i, env->aregs[i],
3111 i, u.l.upper, u.l.lower, *(double *)&u.d);
3113 cpu_fprintf (f, "PC = %08x ", env->pc);
3115 cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3116 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3117 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3118 cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3121 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3122 unsigned long searched_pc, int pc_pos, void *puc)
3124 env->pc = gen_opc_pc[pc_pos];