X-Git-Url: http://git.maemo.org/git/?a=blobdiff_plain;f=target-alpha%2Ftranslate.c;h=6156fb5d0d3d3a933f163c8b5c410e68e63045ac;hb=ed1dda53d073acdb52889aa30f9d251061b053bd;hp=e0acba5fa13dc508f00ae08186eb86d7efd6ac9d;hpb=b03d0971b3744cf5778264c9aedb4084f779c774;p=qemu diff --git a/target-alpha/translate.c b/target-alpha/translate.c index e0acba5..6156fb5 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -15,7 +15,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA */ #include @@ -26,14 +26,23 @@ #include "exec-all.h" #include "disas.h" #include "host-utils.h" -#include "helper.h" #include "tcg-op.h" #include "qemu-common.h" -#define DO_SINGLE_STEP -#define GENERATE_NOP +#include "helper.h" +#define GEN_HELPER 1 +#include "helper.h" + +/* #define DO_SINGLE_STEP */ #define ALPHA_DEBUG_DISAS -#define DO_TB_FLUSH +/* #define DO_TB_FLUSH */ + + +#ifdef ALPHA_DEBUG_DISAS +# define LOG_DISAS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif typedef struct DisasContext DisasContext; struct DisasContext { @@ -46,15 +55,14 @@ struct DisasContext { }; /* global register indexes */ -static TCGv cpu_env; +static TCGv_ptr cpu_env; static TCGv cpu_ir[31]; +static TCGv cpu_fir[31]; static TCGv cpu_pc; - -/* dyngen register indexes */ -static TCGv cpu_T[2]; +static TCGv cpu_lock; /* register names */ -static char cpu_reg_names[10*4+21*5]; +static char cpu_reg_names[10*4+21*5 + 10*5+21*6]; #include "gen-icount.h" @@ -67,261 +75,222 @@ static void alpha_translate_init(void) if (done_init) return; - cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); - -#if TARGET_LONG_BITS > HOST_LONG_BITS - cpu_T[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, - offsetof(CPUState, t0), "T0"); - cpu_T[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, - offsetof(CPUState, t1), "T1"); -#else - cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG1, "T0"); - cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG2, "T1"); -#endif + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); p = cpu_reg_names; for (i = 0; i < 31; i++) { sprintf(p, "ir%d", i); - cpu_ir[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, - offsetof(CPUState, ir[i]), p); + cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, ir[i]), p); p += (i < 10) ? 4 : 5; + + sprintf(p, "fir%d", i); + cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, fir[i]), p); + p += (i < 10) ? 5 : 6; } - cpu_pc = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, - offsetof(CPUState, pc), "pc"); + cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, pc), "pc"); + + cpu_lock = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, lock), "lock"); /* register helpers */ -#undef DEF_HELPER -#define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name); +#define GEN_HELPER 2 #include "helper.h" done_init = 1; } -static always_inline void gen_op_nop (void) +static always_inline void gen_excp (DisasContext *ctx, + int exception, int error_code) { -#if defined(GENERATE_NOP) - gen_op_no_op(); -#endif -} + TCGv_i32 tmp1, tmp2; -#define GEN32(func, NAME) \ -static GenOpFunc *NAME ## _table [32] = { \ -NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \ -NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \ -NAME ## 8, NAME ## 9, NAME ## 10, NAME ## 11, \ -NAME ## 12, NAME ## 13, NAME ## 14, NAME ## 15, \ -NAME ## 16, NAME ## 17, NAME ## 18, NAME ## 19, \ -NAME ## 20, NAME ## 21, NAME ## 22, NAME ## 23, \ -NAME ## 24, NAME ## 25, NAME ## 26, NAME ## 27, \ -NAME ## 28, NAME ## 29, NAME ## 30, NAME ## 31, \ -}; \ -static always_inline void func (int n) \ -{ \ - NAME ## _table[n](); \ + tcg_gen_movi_i64(cpu_pc, ctx->pc); + tmp1 = tcg_const_i32(exception); + tmp2 = tcg_const_i32(error_code); + gen_helper_excp(tmp1, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp1); } -/* FIR moves */ -/* Special hacks for fir31 */ -#define gen_op_load_FT0_fir31 gen_op_reset_FT0 -#define gen_op_load_FT1_fir31 gen_op_reset_FT1 -#define gen_op_load_FT2_fir31 gen_op_reset_FT2 -#define gen_op_store_FT0_fir31 gen_op_nop -#define gen_op_store_FT1_fir31 gen_op_nop -#define gen_op_store_FT2_fir31 gen_op_nop -#define gen_op_cmov_fir31 gen_op_nop -GEN32(gen_op_load_FT0_fir, gen_op_load_FT0_fir); -GEN32(gen_op_load_FT1_fir, gen_op_load_FT1_fir); -GEN32(gen_op_load_FT2_fir, gen_op_load_FT2_fir); -GEN32(gen_op_store_FT0_fir, gen_op_store_FT0_fir); -GEN32(gen_op_store_FT1_fir, gen_op_store_FT1_fir); -GEN32(gen_op_store_FT2_fir, gen_op_store_FT2_fir); -GEN32(gen_op_cmov_fir, gen_op_cmov_fir); - -static always_inline void gen_load_fir (DisasContext *ctx, int firn, int Tn) +static always_inline void gen_invalid (DisasContext *ctx) { - switch (Tn) { - case 0: - gen_op_load_FT0_fir(firn); - break; - case 1: - gen_op_load_FT1_fir(firn); - break; - case 2: - gen_op_load_FT2_fir(firn); - break; - } + gen_excp(ctx, EXCP_OPCDEC, 0); } -static always_inline void gen_store_fir (DisasContext *ctx, int firn, int Tn) +static always_inline void gen_qemu_ldf (TCGv t0, TCGv t1, int flags) { - switch (Tn) { - case 0: - gen_op_store_FT0_fir(firn); - break; - case 1: - gen_op_store_FT1_fir(firn); - break; - case 2: - gen_op_store_FT2_fir(firn); - break; - } + TCGv tmp = tcg_temp_new(); + TCGv_i32 tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, t1, flags); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_memory_to_f(t0, tmp32); + tcg_temp_free_i32(tmp32); + tcg_temp_free(tmp); } -/* Memory moves */ -#if defined(CONFIG_USER_ONLY) -#define OP_LD_TABLE(width) \ -static GenOpFunc *gen_op_ld##width[] = { \ - &gen_op_ld##width##_raw, \ -} -#define OP_ST_TABLE(width) \ -static GenOpFunc *gen_op_st##width[] = { \ - &gen_op_st##width##_raw, \ -} -#else -#define OP_LD_TABLE(width) \ -static GenOpFunc *gen_op_ld##width[] = { \ - &gen_op_ld##width##_kernel, \ - &gen_op_ld##width##_executive, \ - &gen_op_ld##width##_supervisor, \ - &gen_op_ld##width##_user, \ -} -#define OP_ST_TABLE(width) \ -static GenOpFunc *gen_op_st##width[] = { \ - &gen_op_st##width##_kernel, \ - &gen_op_st##width##_executive, \ - &gen_op_st##width##_supervisor, \ - &gen_op_st##width##_user, \ -} -#endif - -#define GEN_LD(width) \ -OP_LD_TABLE(width); \ -static always_inline void gen_ld##width (DisasContext *ctx) \ -{ \ - (*gen_op_ld##width[ctx->mem_idx])(); \ -} - -#define GEN_ST(width) \ -OP_ST_TABLE(width); \ -static always_inline void gen_st##width (DisasContext *ctx) \ -{ \ - (*gen_op_st##width[ctx->mem_idx])(); \ +static always_inline void gen_qemu_ldg (TCGv t0, TCGv t1, int flags) +{ + TCGv tmp = tcg_temp_new(); + tcg_gen_qemu_ld64(tmp, t1, flags); + gen_helper_memory_to_g(t0, tmp); + tcg_temp_free(tmp); } -GEN_LD(l); -GEN_ST(l); -GEN_LD(q); -GEN_ST(q); -GEN_LD(l_l); -GEN_ST(l_c); -GEN_LD(q_l); -GEN_ST(q_c); - -#if 0 /* currently unused */ -GEN_LD(f); -GEN_ST(f); -GEN_LD(g); -GEN_ST(g); -#endif /* 0 */ -GEN_LD(s); -GEN_ST(s); -GEN_LD(t); -GEN_ST(t); - -static always_inline void _gen_op_bcond (DisasContext *ctx) +static always_inline void gen_qemu_lds (TCGv t0, TCGv t1, int flags) { -#if 0 // Qemu does not know how to do this... - gen_op_bcond(ctx->pc); -#else - gen_op_bcond(ctx->pc >> 32, ctx->pc); -#endif + TCGv tmp = tcg_temp_new(); + TCGv_i32 tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld32u(tmp, t1, flags); + tcg_gen_trunc_i64_i32(tmp32, tmp); + gen_helper_memory_to_s(t0, tmp32); + tcg_temp_free_i32(tmp32); + tcg_temp_free(tmp); } -static always_inline void gen_excp (DisasContext *ctx, - int exception, int error_code) +static always_inline void gen_qemu_ldl_l (TCGv t0, TCGv t1, int flags) { - TCGv tmp1, tmp2; - - tcg_gen_movi_i64(cpu_pc, ctx->pc); - tmp1 = tcg_const_i32(exception); - tmp2 = tcg_const_i32(error_code); - tcg_gen_helper_0_2(helper_excp, tmp1, tmp2); - tcg_temp_free(tmp2); - tcg_temp_free(tmp1); + tcg_gen_mov_i64(cpu_lock, t1); + tcg_gen_qemu_ld32s(t0, t1, flags); } -static always_inline void gen_invalid (DisasContext *ctx) +static always_inline void gen_qemu_ldq_l (TCGv t0, TCGv t1, int flags) { - gen_excp(ctx, EXCP_OPCDEC, 0); + tcg_gen_mov_i64(cpu_lock, t1); + tcg_gen_qemu_ld64(t0, t1, flags); } static always_inline void gen_load_mem (DisasContext *ctx, - void (*gen_load_op)(DisasContext *ctx), + void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags), int ra, int rb, int32_t disp16, - int clear) + int fp, int clear) { - if (ra == 31 && disp16 == 0) { - /* UNOP */ - gen_op_nop(); + TCGv addr; + + if (unlikely(ra == 31)) + return; + + addr = tcg_temp_new(); + if (rb != 31) { + tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); + if (clear) + tcg_gen_andi_i64(addr, addr, ~0x7); } else { - if (rb != 31) - tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(cpu_T[0], disp16); if (clear) - tcg_gen_andi_i64(cpu_T[0], cpu_T[0], ~0x7); - (*gen_load_op)(ctx); - if (ra != 31) - tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]); + disp16 &= ~0x7; + tcg_gen_movi_i64(addr, disp16); } + if (fp) + tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx); + else + tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx); + tcg_temp_free(addr); } -static always_inline void gen_store_mem (DisasContext *ctx, - void (*gen_store_op)(DisasContext *ctx), - int ra, int rb, int32_t disp16, - int clear) +static always_inline void gen_qemu_stf (TCGv t0, TCGv t1, int flags) { - if (rb != 31) - tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(cpu_T[0], disp16); - if (clear) - tcg_gen_andi_i64(cpu_T[0], cpu_T[0], ~0x7); - if (ra != 31) - tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); - else - tcg_gen_movi_i64(cpu_T[1], 0); - (*gen_store_op)(ctx); + TCGv_i32 tmp32 = tcg_temp_new_i32(); + TCGv tmp = tcg_temp_new(); + gen_helper_f_to_memory(tmp32, t0); + tcg_gen_extu_i32_i64(tmp, tmp32); + tcg_gen_qemu_st32(tmp, t1, flags); + tcg_temp_free(tmp); + tcg_temp_free_i32(tmp32); } -static always_inline void gen_load_fmem (DisasContext *ctx, - void (*gen_load_fop)(DisasContext *ctx), - int ra, int rb, int32_t disp16) +static always_inline void gen_qemu_stg (TCGv t0, TCGv t1, int flags) { - if (rb != 31) - tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(cpu_T[0], disp16); - (*gen_load_fop)(ctx); - gen_store_fir(ctx, ra, 1); + TCGv tmp = tcg_temp_new(); + gen_helper_g_to_memory(tmp, t0); + tcg_gen_qemu_st64(tmp, t1, flags); + tcg_temp_free(tmp); } -static always_inline void gen_store_fmem (DisasContext *ctx, - void (*gen_store_fop)(DisasContext *ctx), - int ra, int rb, int32_t disp16) +static always_inline void gen_qemu_sts (TCGv t0, TCGv t1, int flags) { - if (rb != 31) - tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16); + TCGv_i32 tmp32 = tcg_temp_new_i32(); + TCGv tmp = tcg_temp_new(); + gen_helper_s_to_memory(tmp32, t0); + tcg_gen_extu_i32_i64(tmp, tmp32); + tcg_gen_qemu_st32(tmp, t1, flags); + tcg_temp_free(tmp); + tcg_temp_free_i32(tmp32); +} + +static always_inline void gen_qemu_stl_c (TCGv t0, TCGv t1, int flags) +{ + int l1, l2; + + l1 = gen_new_label(); + l2 = gen_new_label(); + tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1); + tcg_gen_qemu_st32(t0, t1, flags); + tcg_gen_movi_i64(t0, 1); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_i64(t0, 0); + gen_set_label(l2); + tcg_gen_movi_i64(cpu_lock, -1); +} + +static always_inline void gen_qemu_stq_c (TCGv t0, TCGv t1, int flags) +{ + int l1, l2; + + l1 = gen_new_label(); + l2 = gen_new_label(); + tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1); + tcg_gen_qemu_st64(t0, t1, flags); + tcg_gen_movi_i64(t0, 1); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_i64(t0, 0); + gen_set_label(l2); + tcg_gen_movi_i64(cpu_lock, -1); +} + +static always_inline void gen_store_mem (DisasContext *ctx, + void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), + int ra, int rb, int32_t disp16, + int fp, int clear, int local) +{ + TCGv addr; + if (local) + addr = tcg_temp_local_new(); else - tcg_gen_movi_i64(cpu_T[0], disp16); - gen_load_fir(ctx, ra, 1); - (*gen_store_fop)(ctx); + addr = tcg_temp_new(); + if (rb != 31) { + tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); + if (clear) + tcg_gen_andi_i64(addr, addr, ~0x7); + } else { + if (clear) + disp16 &= ~0x7; + tcg_gen_movi_i64(addr, disp16); + } + if (ra != 31) { + if (fp) + tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx); + else + tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx); + } else { + TCGv zero; + if (local) + zero = tcg_const_local_i64(0); + else + zero = tcg_const_i64(0); + tcg_gen_qemu_store(zero, addr, ctx->mem_idx); + tcg_temp_free(zero); + } + tcg_temp_free(addr); } static always_inline void gen_bcond (DisasContext *ctx, TCGCond cond, - int ra, int32_t disp16, int mask) + int ra, int32_t disp, int mask) { int l1, l2; @@ -329,7 +298,7 @@ static always_inline void gen_bcond (DisasContext *ctx, l2 = gen_new_label(); if (likely(ra != 31)) { if (mask) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, cpu_ir[ra], 1); tcg_gen_brcondi_i64(cond, tmp, 0, l1); tcg_temp_free(tmp); @@ -344,22 +313,57 @@ static always_inline void gen_bcond (DisasContext *ctx, tcg_gen_movi_i64(cpu_pc, ctx->pc); tcg_gen_br(l2); gen_set_label(l1); - tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2)); + tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2)); gen_set_label(l2); } -static always_inline void gen_fbcond (DisasContext *ctx, - void (*gen_test_op)(void), +static always_inline void gen_fbcond (DisasContext *ctx, int opc, int ra, int32_t disp16) { - tcg_gen_movi_i64(cpu_T[1], ctx->pc + (int64_t)(disp16 << 2)); - gen_load_fir(ctx, ra, 0); - (*gen_test_op)(); - _gen_op_bcond(ctx); + int l1, l2; + TCGv tmp; + TCGv src; + + l1 = gen_new_label(); + l2 = gen_new_label(); + if (ra != 31) { + tmp = tcg_temp_new(); + src = cpu_fir[ra]; + } else { + tmp = tcg_const_i64(0); + src = tmp; + } + switch (opc) { + case 0x31: /* FBEQ */ + gen_helper_cmpfeq(tmp, src); + break; + case 0x32: /* FBLT */ + gen_helper_cmpflt(tmp, src); + break; + case 0x33: /* FBLE */ + gen_helper_cmpfle(tmp, src); + break; + case 0x35: /* FBNE */ + gen_helper_cmpfne(tmp, src); + break; + case 0x36: /* FBGE */ + gen_helper_cmpfge(tmp, src); + break; + case 0x37: /* FBGT */ + gen_helper_cmpfgt(tmp, src); + break; + default: + abort(); + } + tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1); + tcg_gen_movi_i64(cpu_pc, ctx->pc); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2)); + gen_set_label(l2); } -static always_inline void gen_cmov (DisasContext *ctx, - TCGCond inv_cond, +static always_inline void gen_cmov (TCGCond inv_cond, int ra, int rb, int rc, int islit, uint8_t lit, int mask) { @@ -372,7 +376,7 @@ static always_inline void gen_cmov (DisasContext *ctx, if (ra != 31) { if (mask) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, cpu_ir[ra], 1); tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1); tcg_temp_free(tmp); @@ -392,56 +396,120 @@ static always_inline void gen_cmov (DisasContext *ctx, gen_set_label(l1); } -static always_inline void gen_farith2 (DisasContext *ctx, - void (*gen_arith_fop)(void), - int rb, int rc) -{ - gen_load_fir(ctx, rb, 0); - (*gen_arith_fop)(); - gen_store_fir(ctx, rc, 0); +#define FARITH2(name) \ +static always_inline void glue(gen_f, name)(int rb, int rc) \ +{ \ + if (unlikely(rc == 31)) \ + return; \ + \ + if (rb != 31) \ + gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \ + else { \ + TCGv tmp = tcg_const_i64(0); \ + gen_helper_ ## name (cpu_fir[rc], tmp); \ + tcg_temp_free(tmp); \ + } \ } - -static always_inline void gen_farith3 (DisasContext *ctx, - void (*gen_arith_fop)(void), - int ra, int rb, int rc) -{ - gen_load_fir(ctx, ra, 0); - gen_load_fir(ctx, rb, 1); - (*gen_arith_fop)(); - gen_store_fir(ctx, rc, 0); +FARITH2(sqrts) +FARITH2(sqrtf) +FARITH2(sqrtg) +FARITH2(sqrtt) +FARITH2(cvtgf) +FARITH2(cvtgq) +FARITH2(cvtqf) +FARITH2(cvtqg) +FARITH2(cvtst) +FARITH2(cvtts) +FARITH2(cvttq) +FARITH2(cvtqs) +FARITH2(cvtqt) +FARITH2(cvtlq) +FARITH2(cvtql) +FARITH2(cvtqlv) +FARITH2(cvtqlsv) + +#define FARITH3(name) \ +static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \ +{ \ + if (unlikely(rc == 31)) \ + return; \ + \ + if (ra != 31) { \ + if (rb != 31) \ + gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \ + else { \ + TCGv tmp = tcg_const_i64(0); \ + gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \ + tcg_temp_free(tmp); \ + } \ + } else { \ + TCGv tmp = tcg_const_i64(0); \ + if (rb != 31) \ + gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \ + else \ + gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \ + tcg_temp_free(tmp); \ + } \ } -static always_inline void gen_fcmov (DisasContext *ctx, - void (*gen_test_fop)(void), - int ra, int rb, int rc) -{ - gen_load_fir(ctx, ra, 0); - gen_load_fir(ctx, rb, 1); - (*gen_test_fop)(); - gen_op_cmov_fir(rc); -} - -static always_inline void gen_fti (DisasContext *ctx, - void (*gen_move_fop)(void), - int ra, int rc) -{ - gen_load_fir(ctx, rc, 0); - (*gen_move_fop)(); - if (ra != 31) - tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); -} - -static always_inline void gen_itf (DisasContext *ctx, - void (*gen_move_fop)(void), - int ra, int rc) -{ - if (ra != 31) - tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); - else - tcg_gen_movi_i64(cpu_T[0], 0); - (*gen_move_fop)(); - gen_store_fir(ctx, rc, 0); +FARITH3(addf) +FARITH3(subf) +FARITH3(mulf) +FARITH3(divf) +FARITH3(addg) +FARITH3(subg) +FARITH3(mulg) +FARITH3(divg) +FARITH3(cmpgeq) +FARITH3(cmpglt) +FARITH3(cmpgle) +FARITH3(adds) +FARITH3(subs) +FARITH3(muls) +FARITH3(divs) +FARITH3(addt) +FARITH3(subt) +FARITH3(mult) +FARITH3(divt) +FARITH3(cmptun) +FARITH3(cmpteq) +FARITH3(cmptlt) +FARITH3(cmptle) +FARITH3(cpys) +FARITH3(cpysn) +FARITH3(cpyse) + +#define FCMOV(name) \ +static always_inline void glue(gen_f, name) (int ra, int rb, int rc) \ +{ \ + int l1; \ + TCGv tmp; \ + \ + if (unlikely(rc == 31)) \ + return; \ + \ + l1 = gen_new_label(); \ + tmp = tcg_temp_new(); \ + if (ra != 31) { \ + tmp = tcg_temp_new(); \ + gen_helper_ ## name (tmp, cpu_fir[ra]); \ + } else { \ + tmp = tcg_const_i64(0); \ + gen_helper_ ## name (tmp, tmp); \ + } \ + tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \ + if (rb != 31) \ + tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \ + else \ + tcg_gen_movi_i64(cpu_fir[rc], 0); \ + gen_set_label(l1); \ } +FCMOV(cmpfeq) +FCMOV(cmpfne) +FCMOV(cmpflt) +FCMOV(cmpfge) +FCMOV(cmpfle) +FCMOV(cmpfgt) /* EXTWH, EXTWH, EXTLH, EXTQH */ static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1), @@ -457,9 +525,9 @@ static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1), tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], 64 - ((lit & 7) * 8)); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]); - } else { + } else { TCGv tmp1, tmp2; - tmp1 = tcg_temp_new(TCG_TYPE_I64); + tmp1 = tcg_temp_new(); tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7); tcg_gen_shli_i64(tmp1, tmp1, 3); tmp2 = tcg_const_i64(64); @@ -486,12 +554,12 @@ static always_inline void gen_ext_l(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1), if (islit) { tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8); } else { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, cpu_ir[rb], 7); tcg_gen_shli_i64(tmp, tmp, 3); tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp); tcg_temp_free(tmp); - } + } if (tcg_gen_ext_i64) tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]); } else @@ -499,31 +567,55 @@ static always_inline void gen_ext_l(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1), } /* Code to call arith3 helpers */ -static always_inline void gen_arith3_helper(void *helper, - int ra, int rb, int rc, - int islit, uint8_t lit) -{ - if (unlikely(rc == 31)) - return; - - if (ra != 31) { - if (islit) { - TCGv tmp = tcg_const_i64(lit); - tcg_gen_helper_1_2(helper, cpu_ir[rc], cpu_ir[ra], tmp); - tcg_temp_free(tmp); - } else - tcg_gen_helper_1_2(helper, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - TCGv tmp1 = tcg_const_i64(0); - if (islit) { - TCGv tmp2 = tcg_const_i64(lit); - tcg_gen_helper_1_2(helper, cpu_ir[rc], tmp1, tmp2); - tcg_temp_free(tmp2); - } else - tcg_gen_helper_1_2(helper, cpu_ir[rc], tmp1, cpu_ir[rb]); - tcg_temp_free(tmp1); - } +#define ARITH3(name) \ +static always_inline void glue(gen_, name) (int ra, int rb, int rc, \ + int islit, uint8_t lit) \ +{ \ + if (unlikely(rc == 31)) \ + return; \ + \ + if (ra != 31) { \ + if (islit) { \ + TCGv tmp = tcg_const_i64(lit); \ + gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \ + tcg_temp_free(tmp); \ + } else \ + gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \ + } else { \ + TCGv tmp1 = tcg_const_i64(0); \ + if (islit) { \ + TCGv tmp2 = tcg_const_i64(lit); \ + gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \ + tcg_temp_free(tmp2); \ + } else \ + gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \ + tcg_temp_free(tmp1); \ + } \ } +ARITH3(cmpbge) +ARITH3(addlv) +ARITH3(sublv) +ARITH3(addqv) +ARITH3(subqv) +ARITH3(mskbl) +ARITH3(insbl) +ARITH3(mskwl) +ARITH3(inswl) +ARITH3(mskll) +ARITH3(insll) +ARITH3(zap) +ARITH3(zapnot) +ARITH3(mskql) +ARITH3(insql) +ARITH3(mskwh) +ARITH3(inswh) +ARITH3(msklh) +ARITH3(inslh) +ARITH3(mskqh) +ARITH3(insqh) +ARITH3(umulh) +ARITH3(mullv) +ARITH3(mulqv) static always_inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, @@ -539,7 +631,7 @@ static always_inline void gen_cmp(TCGCond cond, l2 = gen_new_label(); if (ra != 31) { - tmp = tcg_temp_new(TCG_TYPE_I64); + tmp = tcg_temp_new(); tcg_gen_mov_i64(tmp, cpu_ir[ra]); } else tmp = tcg_const_i64(0); @@ -586,12 +678,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) fn7 = (insn >> 5) & 0x0000007F; fn2 = (insn >> 5) & 0x00000003; ret = 0; -#if defined ALPHA_DEBUG_DISAS - if (logfile != NULL) { - fprintf(logfile, "opc %02x ra %d rb %d rc %d disp16 %04x\n", - opc, ra, rb, rc, disp16); - } -#endif + LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n", + opc, ra, rb, rc, disp16); switch (opc) { case 0x00: /* CALL_PAL */ @@ -604,7 +692,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (ctx->mem_idx & 1) goto invalid_opc; else - gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x1F) << 6), 0); + gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0); #endif } else { /* Invalid PAL call */ @@ -655,103 +743,29 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* LDBU */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; - if (likely(ra != 31)) { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - tcg_gen_qemu_ld8u(cpu_ir[ra], addr, ctx->mem_idx); - tcg_temp_free(addr); - } + gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); break; case 0x0B: /* LDQ_U */ - if (likely(ra != 31)) { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) { - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - tcg_gen_andi_i64(addr, addr, ~0x7); - } else - tcg_gen_movi_i64(addr, disp16 & ~0x7); - tcg_gen_qemu_ld64(cpu_ir[ra], addr, ctx->mem_idx); - tcg_temp_free(addr); - } + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); break; case 0x0C: /* LDWU */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; - if (likely(ra != 31)) { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - tcg_gen_qemu_ld16u(cpu_ir[ra], addr, ctx->mem_idx); - tcg_temp_free(addr); - } + gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); break; case 0x0D: /* STW */ - { - TCGv addr; - if (!(ctx->amask & AMASK_BWX)) - goto invalid_opc; - addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - if (ra != 31) - tcg_gen_qemu_st16(cpu_ir[ra], addr, ctx->mem_idx); - else { - TCGv zero = tcg_const_i64(0); - tcg_gen_qemu_st16(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); - } - tcg_temp_free(addr); - } + gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0); break; case 0x0E: /* STB */ - { - TCGv addr; - if (!(ctx->amask & AMASK_BWX)) - goto invalid_opc; - addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - if (ra != 31) - tcg_gen_qemu_st8(cpu_ir[ra], addr, ctx->mem_idx); - else { - TCGv zero = tcg_const_i64(0); - tcg_gen_qemu_st8(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); - } - tcg_temp_free(addr); - } + gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0); break; case 0x0F: /* STQ_U */ - { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) { - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - tcg_gen_andi_i64(addr, addr, ~0x7); - } else - tcg_gen_movi_i64(addr, disp16 & ~0x7); - if (ra != 31) - tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx); - else { - TCGv zero = tcg_const_i64(0); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); - } - tcg_temp_free(addr); - } + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0); break; case 0x10: switch (fn7) { @@ -778,7 +792,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S4ADDL */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_addi_i64(tmp, tmp, lit); @@ -816,7 +830,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S4SUBL */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_subi_i64(tmp, tmp, lit); @@ -836,13 +850,13 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x0F: /* CMPBGE */ - gen_arith3_helper(helper_cmpbge, ra, rb, rc, islit, lit); + gen_cmpbge(ra, rb, rc, islit, lit); break; case 0x12: /* S8ADDL */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_addi_i64(tmp, tmp, lit); @@ -862,7 +876,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S8SUBL */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_subi_i64(tmp, tmp, lit); @@ -904,7 +918,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S4ADDQ */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); @@ -939,7 +953,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S4SUBQ */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); @@ -962,7 +976,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S8ADDQ */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); @@ -981,7 +995,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* S8SUBQ */ if (likely(rc != 31)) { if (ra != 31) { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); + TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); @@ -1002,11 +1016,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x40: /* ADDL/V */ - gen_arith3_helper(helper_addlv, ra, rb, rc, islit, lit); + gen_addlv(ra, rb, rc, islit, lit); break; case 0x49: /* SUBL/V */ - gen_arith3_helper(helper_sublv, ra, rb, rc, islit, lit); + gen_sublv(ra, rb, rc, islit, lit); break; case 0x4D: /* CMPLT */ @@ -1014,11 +1028,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x60: /* ADDQ/V */ - gen_arith3_helper(helper_addqv, ra, rb, rc, islit, lit); + gen_addqv(ra, rb, rc, islit, lit); break; case 0x69: /* SUBQ/V */ - gen_arith3_helper(helper_subqv, ra, rb, rc, islit, lit); + gen_subqv(ra, rb, rc, islit, lit); break; case 0x6D: /* CMPLE */ @@ -1047,23 +1061,19 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (ra != 31) { if (islit) tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); - tcg_gen_not_i64(tmp, cpu_ir[rb]); - tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], tmp); - tcg_temp_free(tmp); - } + else + tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x14: /* CMOVLBS */ - gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 1); + gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1); break; case 0x16: /* CMOVLBC */ - gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 1); + gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1); break; case 0x20: /* BIS */ @@ -1071,7 +1081,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (ra != 31) { if (islit) tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit); - else + else tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) @@ -1083,11 +1093,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x24: /* CMOVEQ */ - gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0); break; case 0x26: /* CMOVNE */ - gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0); break; case 0x28: /* ORNOT */ @@ -1095,12 +1105,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (ra != 31) { if (islit) tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); - tcg_gen_not_i64(tmp, cpu_ir[rb]); - tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], tmp); - tcg_temp_free(tmp); - } + else + tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ~lit); @@ -1127,11 +1133,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x44: /* CMOVLT */ - gen_cmov(ctx, TCG_COND_GE, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0); break; case 0x46: /* CMOVGE */ - gen_cmov(ctx, TCG_COND_LT, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0); break; case 0x48: /* EQV */ @@ -1139,12 +1145,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (ra != 31) { if (islit) tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else { - TCGv tmp = tcg_temp_new(TCG_TYPE_I64); - tcg_gen_not_i64(tmp, cpu_ir[rb]); - tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], tmp); - tcg_temp_free(tmp); - } + else + tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ~lit); @@ -1159,21 +1161,21 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_movi_i64(cpu_ir[rc], helper_amask(lit)); else - tcg_gen_helper_1_1(helper_amask, cpu_ir[rc], cpu_ir[rb]); + gen_helper_amask(cpu_ir[rc], cpu_ir[rb]); } break; case 0x64: /* CMOVLE */ - gen_cmov(ctx, TCG_COND_GT, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0); break; case 0x66: /* CMOVGT */ - gen_cmov(ctx, TCG_COND_LE, ra, rb, rc, islit, lit, 0); + gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0); break; case 0x6C: /* IMPLVER */ if (rc != 31) - tcg_gen_helper_1_0(helper_load_implver, cpu_ir[rc]); + gen_helper_load_implver(cpu_ir[rc]); break; default: goto invalid_opc; @@ -1183,7 +1185,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) switch (fn7) { case 0x02: /* MSKBL */ - gen_arith3_helper(helper_mskbl, ra, rb, rc, islit, lit); + gen_mskbl(ra, rb, rc, islit, lit); break; case 0x06: /* EXTBL */ @@ -1191,11 +1193,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x0B: /* INSBL */ - gen_arith3_helper(helper_insbl, ra, rb, rc, islit, lit); + gen_insbl(ra, rb, rc, islit, lit); break; case 0x12: /* MSKWL */ - gen_arith3_helper(helper_mskwl, ra, rb, rc, islit, lit); + gen_mskwl(ra, rb, rc, islit, lit); break; case 0x16: /* EXTWL */ @@ -1203,11 +1205,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x1B: /* INSWL */ - gen_arith3_helper(helper_inswl, ra, rb, rc, islit, lit); + gen_inswl(ra, rb, rc, islit, lit); break; case 0x22: /* MSKLL */ - gen_arith3_helper(helper_mskll, ra, rb, rc, islit, lit); + gen_mskll(ra, rb, rc, islit, lit); break; case 0x26: /* EXTLL */ @@ -1215,19 +1217,19 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x2B: /* INSLL */ - gen_arith3_helper(helper_insll, ra, rb, rc, islit, lit); + gen_insll(ra, rb, rc, islit, lit); break; case 0x30: /* ZAP */ - gen_arith3_helper(helper_zap, ra, rb, rc, islit, lit); + gen_zap(ra, rb, rc, islit, lit); break; case 0x31: /* ZAPNOT */ - gen_arith3_helper(helper_zapnot, ra, rb, rc, islit, lit); + gen_zapnot(ra, rb, rc, islit, lit); break; case 0x32: /* MSKQL */ - gen_arith3_helper(helper_mskql, ra, rb, rc, islit, lit); + gen_mskql(ra, rb, rc, islit, lit); break; case 0x34: /* SRL */ @@ -1236,7 +1238,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { - TCGv shift = tcg_temp_new(TCG_TYPE_I64); + TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); @@ -1256,7 +1258,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { - TCGv shift = tcg_temp_new(TCG_TYPE_I64); + TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); @@ -1267,7 +1269,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x3B: /* INSQL */ - gen_arith3_helper(helper_insql, ra, rb, rc, islit, lit); + gen_insql(ra, rb, rc, islit, lit); break; case 0x3C: /* SRA */ @@ -1276,7 +1278,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { - TCGv shift = tcg_temp_new(TCG_TYPE_I64); + TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); @@ -1287,11 +1289,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x52: /* MSKWH */ - gen_arith3_helper(helper_mskwh, ra, rb, rc, islit, lit); + gen_mskwh(ra, rb, rc, islit, lit); break; case 0x57: /* INSWH */ - gen_arith3_helper(helper_inswh, ra, rb, rc, islit, lit); + gen_inswh(ra, rb, rc, islit, lit); break; case 0x5A: /* EXTWH */ @@ -1299,11 +1301,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x62: /* MSKLH */ - gen_arith3_helper(helper_msklh, ra, rb, rc, islit, lit); + gen_msklh(ra, rb, rc, islit, lit); break; case 0x67: /* INSLH */ - gen_arith3_helper(helper_inslh, ra, rb, rc, islit, lit); + gen_inslh(ra, rb, rc, islit, lit); break; case 0x6A: /* EXTLH */ @@ -1311,11 +1313,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x72: /* MSKQH */ - gen_arith3_helper(helper_mskqh, ra, rb, rc, islit, lit); + gen_mskqh(ra, rb, rc, islit, lit); break; case 0x77: /* INSQH */ - gen_arith3_helper(helper_insqh, ra, rb, rc, islit, lit); + gen_insqh(ra, rb, rc, islit, lit); break; case 0x7A: /* EXTQH */ @@ -1354,15 +1356,15 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x30: /* UMULH */ - gen_arith3_helper(helper_umulh, ra, rb, rc, islit, lit); + gen_umulh(ra, rb, rc, islit, lit); break; case 0x40: /* MULL/V */ - gen_arith3_helper(helper_mullv, ra, rb, rc, islit, lit); + gen_mullv(ra, rb, rc, islit, lit); break; case 0x60: /* MULQ/V */ - gen_arith3_helper(helper_mulqv, ra, rb, rc, islit, lit); + gen_mulqv(ra, rb, rc, islit, lit); break; default: goto invalid_opc; @@ -1374,47 +1376,64 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* ITOFS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_itf(ctx, &gen_op_itofs, ra, rc); + if (likely(rc != 31)) { + if (ra != 31) { + TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); + gen_helper_memory_to_s(cpu_fir[rc], tmp); + tcg_temp_free_i32(tmp); + } else + tcg_gen_movi_i64(cpu_fir[rc], 0); + } break; case 0x0A: /* SQRTF */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_farith2(ctx, &gen_op_sqrtf, rb, rc); + gen_fsqrtf(rb, rc); break; case 0x0B: /* SQRTS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_farith2(ctx, &gen_op_sqrts, rb, rc); + gen_fsqrts(rb, rc); break; case 0x14: /* ITOFF */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; -#if 0 // TODO - gen_itf(ctx, &gen_op_itoff, ra, rc); -#else - goto invalid_opc; -#endif + if (likely(rc != 31)) { + if (ra != 31) { + TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); + gen_helper_memory_to_f(cpu_fir[rc], tmp); + tcg_temp_free_i32(tmp); + } else + tcg_gen_movi_i64(cpu_fir[rc], 0); + } break; case 0x24: /* ITOFT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_itf(ctx, &gen_op_itoft, ra, rc); + if (likely(rc != 31)) { + if (ra != 31) + tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]); + else + tcg_gen_movi_i64(cpu_fir[rc], 0); + } break; case 0x2A: /* SQRTG */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_farith2(ctx, &gen_op_sqrtg, rb, rc); + gen_fsqrtg(rb, rc); break; case 0x02B: /* SQRTT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_farith2(ctx, &gen_op_sqrtt, rb, rc); + gen_fsqrtt(rb, rc); break; default: goto invalid_opc; @@ -1426,79 +1445,79 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) switch (fpfn) { /* f11 & 0x3F */ case 0x00: /* ADDF */ - gen_farith3(ctx, &gen_op_addf, ra, rb, rc); + gen_faddf(ra, rb, rc); break; case 0x01: /* SUBF */ - gen_farith3(ctx, &gen_op_subf, ra, rb, rc); + gen_fsubf(ra, rb, rc); break; case 0x02: /* MULF */ - gen_farith3(ctx, &gen_op_mulf, ra, rb, rc); + gen_fmulf(ra, rb, rc); break; case 0x03: /* DIVF */ - gen_farith3(ctx, &gen_op_divf, ra, rb, rc); + gen_fdivf(ra, rb, rc); break; case 0x1E: /* CVTDG */ #if 0 // TODO - gen_farith2(ctx, &gen_op_cvtdg, rb, rc); + gen_fcvtdg(rb, rc); #else goto invalid_opc; #endif break; case 0x20: /* ADDG */ - gen_farith3(ctx, &gen_op_addg, ra, rb, rc); + gen_faddg(ra, rb, rc); break; case 0x21: /* SUBG */ - gen_farith3(ctx, &gen_op_subg, ra, rb, rc); + gen_fsubg(ra, rb, rc); break; case 0x22: /* MULG */ - gen_farith3(ctx, &gen_op_mulg, ra, rb, rc); + gen_fmulg(ra, rb, rc); break; case 0x23: /* DIVG */ - gen_farith3(ctx, &gen_op_divg, ra, rb, rc); + gen_fdivg(ra, rb, rc); break; case 0x25: /* CMPGEQ */ - gen_farith3(ctx, &gen_op_cmpgeq, ra, rb, rc); + gen_fcmpgeq(ra, rb, rc); break; case 0x26: /* CMPGLT */ - gen_farith3(ctx, &gen_op_cmpglt, ra, rb, rc); + gen_fcmpglt(ra, rb, rc); break; case 0x27: /* CMPGLE */ - gen_farith3(ctx, &gen_op_cmpgle, ra, rb, rc); + gen_fcmpgle(ra, rb, rc); break; case 0x2C: /* CVTGF */ - gen_farith2(ctx, &gen_op_cvtgf, rb, rc); + gen_fcvtgf(rb, rc); break; case 0x2D: /* CVTGD */ #if 0 // TODO - gen_farith2(ctx, &gen_op_cvtgd, rb, rc); + gen_fcvtgd(rb, rc); #else goto invalid_opc; #endif break; case 0x2F: /* CVTGQ */ - gen_farith2(ctx, &gen_op_cvtgq, rb, rc); + gen_fcvtgq(rb, rc); break; case 0x3C: /* CVTQF */ - gen_farith2(ctx, &gen_op_cvtqf, rb, rc); + gen_fcvtqf(rb, rc); break; case 0x3E: /* CVTQG */ - gen_farith2(ctx, &gen_op_cvtqg, rb, rc); + gen_fcvtqg(rb, rc); break; default: goto invalid_opc; @@ -1510,73 +1529,73 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) switch (fpfn) { /* f11 & 0x3F */ case 0x00: /* ADDS */ - gen_farith3(ctx, &gen_op_adds, ra, rb, rc); + gen_fadds(ra, rb, rc); break; case 0x01: /* SUBS */ - gen_farith3(ctx, &gen_op_subs, ra, rb, rc); + gen_fsubs(ra, rb, rc); break; case 0x02: /* MULS */ - gen_farith3(ctx, &gen_op_muls, ra, rb, rc); + gen_fmuls(ra, rb, rc); break; case 0x03: /* DIVS */ - gen_farith3(ctx, &gen_op_divs, ra, rb, rc); + gen_fdivs(ra, rb, rc); break; case 0x20: /* ADDT */ - gen_farith3(ctx, &gen_op_addt, ra, rb, rc); + gen_faddt(ra, rb, rc); break; case 0x21: /* SUBT */ - gen_farith3(ctx, &gen_op_subt, ra, rb, rc); + gen_fsubt(ra, rb, rc); break; case 0x22: /* MULT */ - gen_farith3(ctx, &gen_op_mult, ra, rb, rc); + gen_fmult(ra, rb, rc); break; case 0x23: /* DIVT */ - gen_farith3(ctx, &gen_op_divt, ra, rb, rc); + gen_fdivt(ra, rb, rc); break; case 0x24: /* CMPTUN */ - gen_farith3(ctx, &gen_op_cmptun, ra, rb, rc); + gen_fcmptun(ra, rb, rc); break; case 0x25: /* CMPTEQ */ - gen_farith3(ctx, &gen_op_cmpteq, ra, rb, rc); + gen_fcmpteq(ra, rb, rc); break; case 0x26: /* CMPTLT */ - gen_farith3(ctx, &gen_op_cmptlt, ra, rb, rc); + gen_fcmptlt(ra, rb, rc); break; case 0x27: /* CMPTLE */ - gen_farith3(ctx, &gen_op_cmptle, ra, rb, rc); + gen_fcmptle(ra, rb, rc); break; case 0x2C: /* XXX: incorrect */ - if (fn11 == 0x2AC) { + if (fn11 == 0x2AC || fn11 == 0x6AC) { /* CVTST */ - gen_farith2(ctx, &gen_op_cvtst, rb, rc); + gen_fcvtst(rb, rc); } else { /* CVTTS */ - gen_farith2(ctx, &gen_op_cvtts, rb, rc); + gen_fcvtts(rb, rc); } break; case 0x2F: /* CVTTQ */ - gen_farith2(ctx, &gen_op_cvttq, rb, rc); + gen_fcvttq(rb, rc); break; case 0x3C: /* CVTQS */ - gen_farith2(ctx, &gen_op_cvtqs, rb, rc); + gen_fcvtqs(rb, rc); break; case 0x3E: /* CVTQT */ - gen_farith2(ctx, &gen_op_cvtqt, rb, rc); + gen_fcvtqt(rb, rc); break; default: goto invalid_opc; @@ -1586,76 +1605,76 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) switch (fn11) { case 0x010: /* CVTLQ */ - gen_farith2(ctx, &gen_op_cvtlq, rb, rc); + gen_fcvtlq(rb, rc); break; case 0x020: - /* CPYS */ - if (ra == rb) { - if (ra == 31 && rc == 31) { - /* FNOP */ - gen_op_nop(); - } else { + if (likely(rc != 31)) { + if (ra == rb) /* FMOV */ - gen_load_fir(ctx, rb, 0); - gen_store_fir(ctx, rc, 0); - } - } else { - gen_farith3(ctx, &gen_op_cpys, ra, rb, rc); + tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); + else + /* CPYS */ + gen_fcpys(ra, rb, rc); } break; case 0x021: /* CPYSN */ - gen_farith2(ctx, &gen_op_cpysn, rb, rc); + gen_fcpysn(ra, rb, rc); break; case 0x022: /* CPYSE */ - gen_farith2(ctx, &gen_op_cpyse, rb, rc); + gen_fcpyse(ra, rb, rc); break; case 0x024: /* MT_FPCR */ - gen_load_fir(ctx, ra, 0); - gen_op_store_fpcr(); + if (likely(ra != 31)) + gen_helper_store_fpcr(cpu_fir[ra]); + else { + TCGv tmp = tcg_const_i64(0); + gen_helper_store_fpcr(tmp); + tcg_temp_free(tmp); + } break; case 0x025: /* MF_FPCR */ - gen_op_load_fpcr(); - gen_store_fir(ctx, ra, 0); + if (likely(ra != 31)) + gen_helper_load_fpcr(cpu_fir[ra]); break; case 0x02A: /* FCMOVEQ */ - gen_fcmov(ctx, &gen_op_cmpfeq, ra, rb, rc); + gen_fcmpfeq(ra, rb, rc); break; case 0x02B: /* FCMOVNE */ - gen_fcmov(ctx, &gen_op_cmpfne, ra, rb, rc); + gen_fcmpfne(ra, rb, rc); break; case 0x02C: /* FCMOVLT */ - gen_fcmov(ctx, &gen_op_cmpflt, ra, rb, rc); + gen_fcmpflt(ra, rb, rc); break; case 0x02D: /* FCMOVGE */ - gen_fcmov(ctx, &gen_op_cmpfge, ra, rb, rc); + gen_fcmpfge(ra, rb, rc); break; case 0x02E: /* FCMOVLE */ - gen_fcmov(ctx, &gen_op_cmpfle, ra, rb, rc); + gen_fcmpfle(ra, rb, rc); break; case 0x02F: /* FCMOVGT */ - gen_fcmov(ctx, &gen_op_cmpfgt, ra, rb, rc); + gen_fcmpfgt(ra, rb, rc); break; case 0x030: /* CVTQL */ - gen_farith2(ctx, &gen_op_cvtql, rb, rc); + gen_fcvtql(rb, rc); break; case 0x130: /* CVTQL/V */ - gen_farith2(ctx, &gen_op_cvtqlv, rb, rc); + gen_fcvtqlv(rb, rc); break; case 0x530: /* CVTQL/SV */ - gen_farith2(ctx, &gen_op_cvtqlsv, rb, rc); + gen_fcvtqlsv(rb, rc); break; default: goto invalid_opc; @@ -1692,12 +1711,12 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) case 0xC000: /* RPCC */ if (ra != 31) - tcg_gen_helper_1_0(helper_load_pcc, cpu_ir[ra]); + gen_helper_load_pcc(cpu_ir[ra]); break; case 0xE000: /* RC */ if (ra != 31) - tcg_gen_helper_1_0(helper_rc, cpu_ir[ra]); + gen_helper_rc(cpu_ir[ra]); break; case 0xE800: /* ECB */ @@ -1711,7 +1730,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) case 0xF000: /* RS */ if (ra != 31) - tcg_gen_helper_1_0(helper_rs, cpu_ir[ra]); + gen_helper_rs(cpu_ir[ra]); break; case 0xF800: /* WH64 */ @@ -1728,18 +1747,20 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) #else if (!ctx->pal_mode) goto invalid_opc; - gen_op_mfpr(insn & 0xFF); - if (ra != 31) - tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); + if (ra != 31) { + TCGv tmp = tcg_const_i32(insn & 0xFF); + gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]); + tcg_temp_free(tmp); + } break; #endif case 0x1A: - if (ra != 31) - tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); if (rb != 31) tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3); else tcg_gen_movi_i64(cpu_pc, 0); + if (ra != 31) + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); /* Those four jumps only differ by the branch prediction hint */ switch (fn2) { case 0x0: @@ -1764,94 +1785,94 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) #else if (!ctx->pal_mode) goto invalid_opc; - if (rb != 31) - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); - else - tcg_gen_movi_i64(cpu_T[0], 0); - tcg_gen_movi_i64(cpu_T[1], disp12); - tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]); - switch ((insn >> 12) & 0xF) { - case 0x0: - /* Longword physical access */ - gen_op_ldl_raw(); - break; - case 0x1: - /* Quadword physical access */ - gen_op_ldq_raw(); - break; - case 0x2: - /* Longword physical access with lock */ - gen_op_ldl_l_raw(); - break; - case 0x3: - /* Quadword physical access with lock */ - gen_op_ldq_l_raw(); - break; - case 0x4: - /* Longword virtual PTE fetch */ - gen_op_ldl_kernel(); - break; - case 0x5: - /* Quadword virtual PTE fetch */ - gen_op_ldq_kernel(); - break; - case 0x6: - /* Invalid */ - goto invalid_opc; - case 0x7: - /* Invalid */ - goto invalid_opc; - case 0x8: - /* Longword virtual access */ - gen_op_ld_phys_to_virt(); - gen_op_ldl_raw(); - break; - case 0x9: - /* Quadword virtual access */ - gen_op_ld_phys_to_virt(); - gen_op_ldq_raw(); - break; - case 0xA: - /* Longword virtual access with protection check */ - gen_ldl(ctx); - break; - case 0xB: - /* Quadword virtual access with protection check */ - gen_ldq(ctx); - break; - case 0xC: - /* Longword virtual access with altenate access mode */ - gen_op_set_alt_mode(); - gen_op_ld_phys_to_virt(); - gen_op_ldl_raw(); - gen_op_restore_mode(); - break; - case 0xD: - /* Quadword virtual access with altenate access mode */ - gen_op_set_alt_mode(); - gen_op_ld_phys_to_virt(); - gen_op_ldq_raw(); - gen_op_restore_mode(); - break; - case 0xE: - /* Longword virtual access with alternate access mode and - * protection checks - */ - gen_op_set_alt_mode(); - gen_op_ldl_data(); - gen_op_restore_mode(); - break; - case 0xF: - /* Quadword virtual access with alternate access mode and - * protection checks - */ - gen_op_set_alt_mode(); - gen_op_ldq_data(); - gen_op_restore_mode(); - break; + if (ra != 31) { + TCGv addr = tcg_temp_new(); + if (rb != 31) + tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); + else + tcg_gen_movi_i64(addr, disp12); + switch ((insn >> 12) & 0xF) { + case 0x0: + /* Longword physical access (hw_ldl/p) */ + gen_helper_ldl_raw(cpu_ir[ra], addr); + break; + case 0x1: + /* Quadword physical access (hw_ldq/p) */ + gen_helper_ldq_raw(cpu_ir[ra], addr); + break; + case 0x2: + /* Longword physical access with lock (hw_ldl_l/p) */ + gen_helper_ldl_l_raw(cpu_ir[ra], addr); + break; + case 0x3: + /* Quadword physical access with lock (hw_ldq_l/p) */ + gen_helper_ldq_l_raw(cpu_ir[ra], addr); + break; + case 0x4: + /* Longword virtual PTE fetch (hw_ldl/v) */ + tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); + break; + case 0x5: + /* Quadword virtual PTE fetch (hw_ldq/v) */ + tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + break; + case 0x6: + /* Incpu_ir[ra]id */ + goto invalid_opc; + case 0x7: + /* Incpu_ir[ra]id */ + goto invalid_opc; + case 0x8: + /* Longword virtual access (hw_ldl) */ + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_ldl_raw(cpu_ir[ra], addr); + break; + case 0x9: + /* Quadword virtual access (hw_ldq) */ + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_ldq_raw(cpu_ir[ra], addr); + break; + case 0xA: + /* Longword virtual access with protection check (hw_ldl/w) */ + tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); + break; + case 0xB: + /* Quadword virtual access with protection check (hw_ldq/w) */ + tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + break; + case 0xC: + /* Longword virtual access with alt access mode (hw_ldl/a)*/ + gen_helper_set_alt_mode(); + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_ldl_raw(cpu_ir[ra], addr); + gen_helper_restore_mode(); + break; + case 0xD: + /* Quadword virtual access with alt access mode (hw_ldq/a) */ + gen_helper_set_alt_mode(); + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_ldq_raw(cpu_ir[ra], addr); + gen_helper_restore_mode(); + break; + case 0xE: + /* Longword virtual access with alternate access mode and + * protection checks (hw_ldl/wa) + */ + gen_helper_set_alt_mode(); + gen_helper_ldl_data(cpu_ir[ra], addr); + gen_helper_restore_mode(); + break; + case 0xF: + /* Quadword virtual access with alternate access mode and + * protection checks (hw_ldq/wa) + */ + gen_helper_set_alt_mode(); + gen_helper_ldq_data(cpu_ir[ra], addr); + gen_helper_restore_mode(); + break; + } + tcg_temp_free(addr); } - if (ra != 31) - tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]); break; #endif case 0x1C: @@ -1886,7 +1907,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); else - tcg_gen_helper_1_1(helper_ctpop, cpu_ir[rc], cpu_ir[rb]); + gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); } break; case 0x31: @@ -1904,7 +1925,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); else - tcg_gen_helper_1_1(helper_ctlz, cpu_ir[rc], cpu_ir[rb]); + gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); } break; case 0x33: @@ -1915,7 +1936,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) if (islit) tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); else - tcg_gen_helper_1_1(helper_cttz, cpu_ir[rc], cpu_ir[rb]); + gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); } break; case 0x34: @@ -2006,13 +2027,29 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) /* FTOIT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_fti(ctx, &gen_op_ftoit, ra, rb); + if (likely(rc != 31)) { + if (ra != 31) + tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]); + else + tcg_gen_movi_i64(cpu_ir[rc], 0); + } break; case 0x78: /* FTOIS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; - gen_fti(ctx, &gen_op_ftois, ra, rb); + if (rc != 31) { + TCGv_i32 tmp1 = tcg_temp_new_i32(); + if (ra != 31) + gen_helper_s_to_memory(tmp1, cpu_fir[ra]); + else { + TCGv tmp2 = tcg_const_i64(0); + gen_helper_s_to_memory(tmp1, tmp2); + tcg_temp_free(tmp2); + } + tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1); + tcg_temp_free_i32(tmp1); + } break; default: goto invalid_opc; @@ -2025,12 +2062,18 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) #else if (!ctx->pal_mode) goto invalid_opc; - if (ra != 31) - tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); - else - tcg_gen_movi_i64(cpu_T[0], 0); - gen_op_mtpr(insn & 0xFF); - ret = 2; + else { + TCGv tmp1 = tcg_const_i32(insn & 0xFF); + if (ra != 31) + gen_helper_mtpr(tmp1, cpu_ir[ra]); + else { + TCGv tmp2 = tcg_const_i64(0); + gen_helper_mtpr(tmp1, tmp2); + tcg_temp_free(tmp2); + } + tcg_temp_free(tmp1); + ret = 2; + } break; #endif case 0x1E: @@ -2042,15 +2085,17 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) goto invalid_opc; if (rb == 31) { /* "Old" alpha */ - gen_op_hw_rei(); + gen_helper_hw_rei(); } else { - if (ra != 31) - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); - else - tcg_gen_movi_i64(cpu_T[0], 0); - tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51)); - tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]); - gen_op_hw_ret(); + TCGv tmp; + + if (ra != 31) { + tmp = tcg_temp_new(); + tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51)); + } else + tmp = tcg_const_i64(((int64_t)insn << 51) >> 51); + gen_helper_hw_ret(tmp); + tcg_temp_free(tmp); } ret = 2; break; @@ -2062,206 +2107,154 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) #else if (!ctx->pal_mode) goto invalid_opc; - if (ra != 31) - tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp12); - else - tcg_gen_movi_i64(cpu_T[0], disp12); - if (ra != 31) - tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); - else - tcg_gen_movi_i64(cpu_T[1], 0); - switch ((insn >> 12) & 0xF) { - case 0x0: - /* Longword physical access */ - gen_op_stl_raw(); - break; - case 0x1: - /* Quadword physical access */ - gen_op_stq_raw(); - break; - case 0x2: - /* Longword physical access with lock */ - gen_op_stl_c_raw(); - break; - case 0x3: - /* Quadword physical access with lock */ - gen_op_stq_c_raw(); - break; - case 0x4: - /* Longword virtual access */ - gen_op_st_phys_to_virt(); - gen_op_stl_raw(); - break; - case 0x5: - /* Quadword virtual access */ - gen_op_st_phys_to_virt(); - gen_op_stq_raw(); - break; - case 0x6: - /* Invalid */ - goto invalid_opc; - case 0x7: - /* Invalid */ - goto invalid_opc; - case 0x8: - /* Invalid */ - goto invalid_opc; - case 0x9: - /* Invalid */ - goto invalid_opc; - case 0xA: - /* Invalid */ - goto invalid_opc; - case 0xB: - /* Invalid */ - goto invalid_opc; - case 0xC: - /* Longword virtual access with alternate access mode */ - gen_op_set_alt_mode(); - gen_op_st_phys_to_virt(); - gen_op_ldl_raw(); - gen_op_restore_mode(); - break; - case 0xD: - /* Quadword virtual access with alternate access mode */ - gen_op_set_alt_mode(); - gen_op_st_phys_to_virt(); - gen_op_ldq_raw(); - gen_op_restore_mode(); - break; - case 0xE: - /* Invalid */ - goto invalid_opc; - case 0xF: - /* Invalid */ - goto invalid_opc; + else { + TCGv addr, val; + addr = tcg_temp_new(); + if (rb != 31) + tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); + else + tcg_gen_movi_i64(addr, disp12); + if (ra != 31) + val = cpu_ir[ra]; + else { + val = tcg_temp_new(); + tcg_gen_movi_i64(val, 0); + } + switch ((insn >> 12) & 0xF) { + case 0x0: + /* Longword physical access */ + gen_helper_stl_raw(val, addr); + break; + case 0x1: + /* Quadword physical access */ + gen_helper_stq_raw(val, addr); + break; + case 0x2: + /* Longword physical access with lock */ + gen_helper_stl_c_raw(val, val, addr); + break; + case 0x3: + /* Quadword physical access with lock */ + gen_helper_stq_c_raw(val, val, addr); + break; + case 0x4: + /* Longword virtual access */ + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_stl_raw(val, addr); + break; + case 0x5: + /* Quadword virtual access */ + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_stq_raw(val, addr); + break; + case 0x6: + /* Invalid */ + goto invalid_opc; + case 0x7: + /* Invalid */ + goto invalid_opc; + case 0x8: + /* Invalid */ + goto invalid_opc; + case 0x9: + /* Invalid */ + goto invalid_opc; + case 0xA: + /* Invalid */ + goto invalid_opc; + case 0xB: + /* Invalid */ + goto invalid_opc; + case 0xC: + /* Longword virtual access with alternate access mode */ + gen_helper_set_alt_mode(); + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_stl_raw(val, addr); + gen_helper_restore_mode(); + break; + case 0xD: + /* Quadword virtual access with alternate access mode */ + gen_helper_set_alt_mode(); + gen_helper_st_virt_to_phys(addr, addr); + gen_helper_stl_raw(val, addr); + gen_helper_restore_mode(); + break; + case 0xE: + /* Invalid */ + goto invalid_opc; + case 0xF: + /* Invalid */ + goto invalid_opc; + } + if (ra == 31) + tcg_temp_free(val); + tcg_temp_free(addr); } - ret = 2; break; #endif case 0x20: /* LDF */ -#if 0 // TODO - gen_load_fmem(ctx, &gen_ldf, ra, rb, disp16); -#else - goto invalid_opc; -#endif + gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); break; case 0x21: /* LDG */ -#if 0 // TODO - gen_load_fmem(ctx, &gen_ldg, ra, rb, disp16); -#else - goto invalid_opc; -#endif + gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); break; case 0x22: /* LDS */ - gen_load_fmem(ctx, &gen_lds, ra, rb, disp16); + gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); break; case 0x23: /* LDT */ - gen_load_fmem(ctx, &gen_ldt, ra, rb, disp16); + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); break; case 0x24: /* STF */ -#if 0 // TODO - gen_store_fmem(ctx, &gen_stf, ra, rb, disp16); -#else - goto invalid_opc; -#endif + gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0); break; case 0x25: /* STG */ -#if 0 // TODO - gen_store_fmem(ctx, &gen_stg, ra, rb, disp16); -#else - goto invalid_opc; -#endif + gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0); break; case 0x26: /* STS */ - gen_store_fmem(ctx, &gen_sts, ra, rb, disp16); + gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0); break; case 0x27: /* STT */ - gen_store_fmem(ctx, &gen_stt, ra, rb, disp16); + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0); break; case 0x28: /* LDL */ - if (likely(ra != 31)) { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, ctx->mem_idx); - tcg_temp_free(addr); - } + gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); break; case 0x29: /* LDQ */ - if (likely(ra != 31)) { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - tcg_gen_qemu_ld64(cpu_ir[ra], addr, ctx->mem_idx); - tcg_temp_free(addr); - } + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); break; case 0x2A: /* LDL_L */ - gen_load_mem(ctx, &gen_ldl_l, ra, rb, disp16, 0); + gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); break; case 0x2B: /* LDQ_L */ - gen_load_mem(ctx, &gen_ldq_l, ra, rb, disp16, 0); + gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); break; case 0x2C: /* STL */ - { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - if (ra != 31) - tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx); - else { - TCGv zero = tcg_const_i64(0); - tcg_gen_qemu_st32(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); - } - tcg_temp_free(addr); - } + gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0); break; case 0x2D: /* STQ */ - { - TCGv addr = tcg_temp_new(TCG_TYPE_I64); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(addr, disp16); - if (ra != 31) - tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx); - else { - TCGv zero = tcg_const_i64(0); - tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); - } - tcg_temp_free(addr); - } + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0); break; case 0x2E: /* STL_C */ - gen_store_mem(ctx, &gen_stl_c, ra, rb, disp16, 0); + gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1); break; case 0x2F: /* STQ_C */ - gen_store_mem(ctx, &gen_stq_c, ra, rb, disp16, 0); + gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1); break; case 0x30: /* BR */ @@ -2270,19 +2263,10 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); ret = 1; break; - case 0x31: - /* FBEQ */ - gen_fbcond(ctx, &gen_op_cmpfeq, ra, disp16); - ret = 1; - break; - case 0x32: - /* FBLT */ - gen_fbcond(ctx, &gen_op_cmpflt, ra, disp16); - ret = 1; - break; - case 0x33: - /* FBLE */ - gen_fbcond(ctx, &gen_op_cmpfle, ra, disp16); + case 0x31: /* FBEQ */ + case 0x32: /* FBLT */ + case 0x33: /* FBLE */ + gen_fbcond(ctx, opc, ra, disp16); ret = 1; break; case 0x34: @@ -2292,59 +2276,50 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); ret = 1; break; - case 0x35: - /* FBNE */ - gen_fbcond(ctx, &gen_op_cmpfne, ra, disp16); - ret = 1; - break; - case 0x36: - /* FBGE */ - gen_fbcond(ctx, &gen_op_cmpfge, ra, disp16); - ret = 1; - break; - case 0x37: - /* FBGT */ - gen_fbcond(ctx, &gen_op_cmpfgt, ra, disp16); + case 0x35: /* FBNE */ + case 0x36: /* FBGE */ + case 0x37: /* FBGT */ + gen_fbcond(ctx, opc, ra, disp16); ret = 1; break; case 0x38: /* BLBC */ - gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 1); + gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); ret = 1; break; case 0x39: /* BEQ */ - gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); ret = 1; break; case 0x3A: /* BLT */ - gen_bcond(ctx, TCG_COND_LT, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); ret = 1; break; case 0x3B: /* BLE */ - gen_bcond(ctx, TCG_COND_LE, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); ret = 1; break; case 0x3C: /* BLBS */ - gen_bcond(ctx, TCG_COND_NE, ra, disp16, 1); + gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); ret = 1; break; case 0x3D: /* BNE */ - gen_bcond(ctx, TCG_COND_NE, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); ret = 1; break; case 0x3E: /* BGE */ - gen_bcond(ctx, TCG_COND_GE, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); ret = 1; break; case 0x3F: /* BGT */ - gen_bcond(ctx, TCG_COND_GT, ra, disp16, 0); + gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); ret = 1; break; invalid_opc: @@ -2367,6 +2342,7 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, target_ulong pc_start; uint32_t insn; uint16_t *gen_opc_end; + CPUBreakpoint *bp; int j, lj = -1; int ret; int num_insns; @@ -2389,9 +2365,9 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, gen_icount_start(); for (ret = 0; ret == 0;) { - if (env->nb_breakpoints > 0) { - for(j = 0; j < env->nb_breakpoints; j++) { - if (env->breakpoints[j] == ctx.pc) { + if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { + TAILQ_FOREACH(bp, &env->breakpoints, entry) { + if (bp->pc == ctx.pc) { gen_excp(&ctx, EXCP_DEBUG, 0); break; } @@ -2403,26 +2379,22 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; - gen_opc_pc[lj] = ctx.pc; - gen_opc_instr_start[lj] = 1; - gen_opc_icount[lj] = num_insns; } + gen_opc_pc[lj] = ctx.pc; + gen_opc_instr_start[lj] = 1; + gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); #if defined ALPHA_DEBUG_DISAS insn_count++; - if (logfile != NULL) { - fprintf(logfile, "pc " TARGET_FMT_lx " mem_idx %d\n", - ctx.pc, ctx.mem_idx); - } + LOG_DISAS("pc " TARGET_FMT_lx " mem_idx %d\n", + ctx.pc, ctx.mem_idx); #endif insn = ldl_code(ctx.pc); #if defined ALPHA_DEBUG_DISAS insn_count++; - if (logfile != NULL) { - fprintf(logfile, "opcode %08x %d\n", insn, insn_count); - } + LOG_DISAS("opcode %08x %d\n", insn, insn_count); #endif num_insns++; ctx.pc += 4; @@ -2433,10 +2405,15 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, * generation */ if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) || - (env->singlestep_enabled) || num_insns >= max_insns) { break; } + + if (env->singlestep_enabled) { + gen_excp(&ctx, EXCP_DEBUG, 0); + break; + } + #if defined (DO_SINGLE_STEP) break; #endif @@ -2445,7 +2422,7 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, tcg_gen_movi_i64(cpu_pc, ctx.pc); } #if defined (DO_TB_FLUSH) - tcg_gen_helper_0_0(helper_tb_flush); + gen_helper_tb_flush(); #endif if (tb->cflags & CF_LAST_IO) gen_io_end(); @@ -2463,13 +2440,11 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, tb->icount = num_insns; } #if defined ALPHA_DEBUG_DISAS - if (loglevel & CPU_LOG_TB_CPU) { - cpu_dump_state(env, logfile, fprintf, 0); - } - if (loglevel & CPU_LOG_TB_IN_ASM) { - fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); - target_disas(logfile, pc_start, ctx.pc - pc_start, 1); - fprintf(logfile, "\n"); + log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0); + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(pc_start, ctx.pc - pc_start, 1); + qemu_log("\n"); } #endif } @@ -2490,8 +2465,6 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model) uint64_t hwpcb; env = qemu_mallocz(sizeof(CPUAlphaState)); - if (!env) - return NULL; cpu_exec_init(env); alpha_translate_init(); tlb_flush(env, 1);