Refactor and enhance break/watchpoint API (Jan Kiszka)
[qemu] / target-ppc / translate.c
index 96b3699..085c7a1 100644 (file)
 #include "cpu.h"
 #include "exec-all.h"
 #include "disas.h"
-#include "helper.h"
 #include "tcg-op.h"
 #include "qemu-common.h"
 
+#include "helper.h"
+#define GEN_HELPER 1
+#include "helper.h"
+
 #define CPU_SINGLE_STEP 0x1
 #define CPU_BRANCH_STEP 0x2
 #define GDBSTUB_SINGLE_STEP 0x4
@@ -44,7 +47,7 @@
 /* Code translation helpers                                                  */
 
 /* global register indexes */
-static TCGv cpu_env;
+static TCGv_ptr cpu_env;
 static char cpu_reg_names[10*3 + 22*4 /* GPR */
 #if !defined(TARGET_PPC64)
     + 10*4 + 22*5 /* SPE GPRh */
@@ -56,22 +59,24 @@ static TCGv cpu_gpr[32];
 #if !defined(TARGET_PPC64)
 static TCGv cpu_gprh[32];
 #endif
-static TCGv cpu_fpr[32];
-static TCGv cpu_avrh[32], cpu_avrl[32];
-static TCGv cpu_crf[8];
+static TCGv_i64 cpu_fpr[32];
+static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
+static TCGv_i32 cpu_crf[8];
 static TCGv cpu_nip;
 static TCGv cpu_ctr;
 static TCGv cpu_lr;
+static TCGv cpu_xer;
+static TCGv_i32 cpu_fpscr;
 
 /* dyngen register indexes */
 static TCGv cpu_T[3];
 #if defined(TARGET_PPC64)
 #define cpu_T64 cpu_T
 #else
-static TCGv cpu_T64[3];
+static TCGv_i64 cpu_T64[3];
 #endif
-static TCGv cpu_FT[3];
-static TCGv cpu_AVRh[3], cpu_AVRl[3];
+static TCGv_i64 cpu_FT[3];
+static TCGv_i64 cpu_AVRh[3], cpu_AVRl[3];
 
 #include "gen-icount.h"
 
@@ -84,100 +89,107 @@ void ppc_translate_init(void)
     if (done_init)
         return;
 
-    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 #if TARGET_LONG_BITS > HOST_LONG_BITS
-    cpu_T[0] = tcg_global_mem_new(TCG_TYPE_TL,
-                                  TCG_AREG0, offsetof(CPUState, t0), "T0");
-    cpu_T[1] = tcg_global_mem_new(TCG_TYPE_TL,
-                                  TCG_AREG0, offsetof(CPUState, t1), "T1");
-    cpu_T[2] = tcg_global_mem_new(TCG_TYPE_TL,
-                                  TCG_AREG0, offsetof(CPUState, t2), "T2");
+    cpu_T[0] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t0), "T0");
+    cpu_T[1] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t1), "T1");
+    cpu_T[2] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t2), "T2");
 #else
-    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG1, "T0");
-    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG2, "T1");
-    cpu_T[2] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG3, "T2");
+    cpu_T[0] = tcg_global_reg_new(TCG_AREG1, "T0");
+    cpu_T[1] = tcg_global_reg_new(TCG_AREG2, "T1");
+#ifdef HOST_I386
+    /* XXX: This is a temporary workaround for i386.
+     *      On i386 qemu_st32 runs out of registers.
+     *      The proper fix is to remove cpu_T.
+     */
+    cpu_T[2] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t2), "T2");
+#else
+    cpu_T[2] = tcg_global_reg_new(TCG_AREG3, "T2");
+#endif
 #endif
 #if !defined(TARGET_PPC64)
-    cpu_T64[0] = tcg_global_mem_new(TCG_TYPE_I64,
-                                    TCG_AREG0, offsetof(CPUState, t0_64),
-                                    "T0_64");
-    cpu_T64[1] = tcg_global_mem_new(TCG_TYPE_I64,
-                                    TCG_AREG0, offsetof(CPUState, t1_64),
-                                    "T1_64");
-    cpu_T64[2] = tcg_global_mem_new(TCG_TYPE_I64,
-                                    TCG_AREG0, offsetof(CPUState, t2_64),
-                                    "T2_64");
-#endif
-
-    cpu_FT[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
-                                   offsetof(CPUState, ft0), "FT0");
-    cpu_FT[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
-                                   offsetof(CPUState, ft1), "FT1");
-    cpu_FT[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
-                                   offsetof(CPUState, ft2), "FT2");
-
-    cpu_AVRh[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_T64[0] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t0_64),
+                                        "T0_64");
+    cpu_T64[1] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t1_64),
+                                        "T1_64");
+    cpu_T64[2] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t2_64),
+                                        "T2_64");
+#endif
+
+    cpu_FT[0] = tcg_global_mem_new_i64(TCG_AREG0,
+                                       offsetof(CPUState, ft0), "FT0");
+    cpu_FT[1] = tcg_global_mem_new_i64(TCG_AREG0,
+                                       offsetof(CPUState, ft1), "FT1");
+    cpu_FT[2] = tcg_global_mem_new_i64(TCG_AREG0,
+                                       offsetof(CPUState, ft2), "FT2");
+
+    cpu_AVRh[0] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr0.u64[0]), "AVR0H");
-    cpu_AVRl[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_AVRl[0] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr0.u64[1]), "AVR0L");
-    cpu_AVRh[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_AVRh[1] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr1.u64[0]), "AVR1H");
-    cpu_AVRl[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_AVRl[1] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr1.u64[1]), "AVR1L");
-    cpu_AVRh[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_AVRh[2] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr2.u64[0]), "AVR2H");
-    cpu_AVRl[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+    cpu_AVRl[2] = tcg_global_mem_new_i64(TCG_AREG0,
                                      offsetof(CPUState, avr2.u64[1]), "AVR2L");
 
     p = cpu_reg_names;
 
     for (i = 0; i < 8; i++) {
         sprintf(p, "crf%d", i);
-        cpu_crf[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
-                                        offsetof(CPUState, crf[i]), p);
+        cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
+                                            offsetof(CPUState, crf[i]), p);
         p += 5;
     }
 
     for (i = 0; i < 32; i++) {
         sprintf(p, "r%d", i);
-        cpu_gpr[i] = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0,
+        cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
                                         offsetof(CPUState, gpr[i]), p);
         p += (i < 10) ? 3 : 4;
 #if !defined(TARGET_PPC64)
         sprintf(p, "r%dH", i);
-        cpu_gprh[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
-                                         offsetof(CPUState, gprh[i]), p);
+        cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0,
+                                             offsetof(CPUState, gprh[i]), p);
         p += (i < 10) ? 4 : 5;
 #endif
 
         sprintf(p, "fp%d", i);
-        cpu_fpr[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
-                                        offsetof(CPUState, fpr[i]), p);
+        cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
+                                            offsetof(CPUState, fpr[i]), p);
         p += (i < 10) ? 4 : 5;
 
         sprintf(p, "avr%dH", i);
-        cpu_avrh[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+        cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
                                          offsetof(CPUState, avr[i].u64[0]), p);
         p += (i < 10) ? 6 : 7;
 
         sprintf(p, "avr%dL", i);
-        cpu_avrl[i] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0,
+        cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
                                          offsetof(CPUState, avr[i].u64[1]), p);
         p += (i < 10) ? 6 : 7;
     }
 
-    cpu_nip = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0,
+    cpu_nip = tcg_global_mem_new(TCG_AREG0,
                                  offsetof(CPUState, nip), "nip");
 
-    cpu_ctr = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0,
+    cpu_ctr = tcg_global_mem_new(TCG_AREG0,
                                  offsetof(CPUState, ctr), "ctr");
 
-    cpu_lr = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0,
+    cpu_lr = tcg_global_mem_new(TCG_AREG0,
                                 offsetof(CPUState, lr), "lr");
 
+    cpu_xer = tcg_global_mem_new(TCG_AREG0,
+                                 offsetof(CPUState, xer), "xer");
+
+    cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
+                                       offsetof(CPUState, fpscr), "fpscr");
+
     /* register helpers */
-#undef DEF_HELPER
-#define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
+#define GEN_HELPER 2
 #include "helper.h"
 
     done_init = 1;
@@ -226,17 +238,6 @@ struct opc_handler_t {
 #endif
 };
 
-static always_inline void gen_set_Rc0 (DisasContext *ctx)
-{
-#if defined(TARGET_PPC64)
-    if (ctx->sf_mode)
-        gen_op_cmpi_64(0);
-    else
-#endif
-        gen_op_cmpi(0);
-    gen_op_set_Rc0();
-}
-
 static always_inline void gen_reset_fpstatus (void)
 {
 #ifdef CONFIG_SOFTFLOAT
@@ -252,13 +253,16 @@ static always_inline void gen_compute_fprf (int set_fprf, int set_rc)
         *gen_fprf_ptr++ = gen_opc_ptr;
 #endif
         gen_op_compute_fprf(1);
-        if (unlikely(set_rc))
-            tcg_gen_andi_i32(cpu_crf[1], cpu_T[0], 0xf);
+        if (unlikely(set_rc)) {
+            tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_T[0]);
+            tcg_gen_andi_i32(cpu_crf[1], cpu_crf[1], 0xf);
+        }
         gen_op_float_check_status();
     } else if (unlikely(set_rc)) {
         /* We always need to compute fpcc */
         gen_op_compute_fprf(0);
-        tcg_gen_andi_i32(cpu_crf[1], cpu_T[0], 0xf);
+        tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_T[0]);
+        tcg_gen_andi_i32(cpu_crf[1], cpu_crf[1], 0xf);
         if (set_fprf)
             gen_op_float_check_status();
     }
@@ -704,328 +708,312 @@ static opc_handler_t invalid_handler = {
     .handler = gen_invalid,
 };
 
-/***                           Integer arithmetic                          ***/
-#define __GEN_INT_ARITH2(name, opc1, opc2, opc3, inval, type)                 \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                              \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
-}
-
-#define __GEN_INT_ARITH2_O(name, opc1, opc2, opc3, inval, type)               \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                              \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
-}
-
-#define __GEN_INT_ARITH1(name, opc1, opc2, opc3, type)                        \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type)                         \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
-}
-#define __GEN_INT_ARITH1_O(name, opc1, opc2, opc3, type)                      \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type)                         \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
-}
-
-/* Two operands arithmetic functions */
-#define GEN_INT_ARITH2(name, opc1, opc2, opc3, type)                          \
-__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000000, type)                    \
-__GEN_INT_ARITH2_O(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type)
+/***                           Integer comparison                          ***/
 
-/* Two operands arithmetic functions with no overflow allowed */
-#define GEN_INT_ARITHN(name, opc1, opc2, opc3, type)                          \
-__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000400, type)
+static always_inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
+{
+    int l1, l2, l3;
 
-/* One operand arithmetic functions */
-#define GEN_INT_ARITH1(name, opc1, opc2, opc3, type)                          \
-__GEN_INT_ARITH1(name, opc1, opc2, opc3, type)                                \
-__GEN_INT_ARITH1_O(name##o, opc1, opc2, opc3 | 0x10, type)
+    tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_xer);
+    tcg_gen_shri_i32(cpu_crf[crf], cpu_crf[crf], XER_SO);
+    tcg_gen_andi_i32(cpu_crf[crf], cpu_crf[crf], 1);
 
-#if defined(TARGET_PPC64)
-#define __GEN_INT_ARITH2_64(name, opc1, opc2, opc3, inval, type)              \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                              \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    if (ctx->sf_mode)                                                         \
-        gen_op_##name##_64();                                                 \
-    else                                                                      \
-        gen_op_##name();                                                      \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+    l3 = gen_new_label();
+    if (s) {
+        tcg_gen_brcond_tl(TCG_COND_LT, arg0, arg1, l1);
+        tcg_gen_brcond_tl(TCG_COND_GT, arg0, arg1, l2);
+    } else {
+        tcg_gen_brcond_tl(TCG_COND_LTU, arg0, arg1, l1);
+        tcg_gen_brcond_tl(TCG_COND_GTU, arg0, arg1, l2);
+    }
+    tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_EQ);
+    tcg_gen_br(l3);
+    gen_set_label(l1);
+    tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_LT);
+    tcg_gen_br(l3);
+    gen_set_label(l2);
+    tcg_gen_ori_i32(cpu_crf[crf], cpu_crf[crf], 1 << CRF_GT);
+    gen_set_label(l3);
 }
 
-#define __GEN_INT_ARITH2_O_64(name, opc1, opc2, opc3, inval, type)            \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                              \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    if (ctx->sf_mode)                                                         \
-        gen_op_##name##_64();                                                 \
-    else                                                                      \
-        gen_op_##name();                                                      \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
+static always_inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+    TCGv t0 = tcg_const_local_tl(arg1);
+    gen_op_cmp(arg0, t0, s, crf);
+    tcg_temp_free(t0);
 }
 
-#define __GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type)                     \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type)                         \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    if (ctx->sf_mode)                                                         \
-        gen_op_##name##_64();                                                 \
-    else                                                                      \
-        gen_op_##name();                                                      \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
-}
-#define __GEN_INT_ARITH1_O_64(name, opc1, opc2, opc3, type)                   \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type)                         \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    if (ctx->sf_mode)                                                         \
-        gen_op_##name##_64();                                                 \
-    else                                                                      \
-        gen_op_##name();                                                      \
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);                       \
-    if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
+#if defined(TARGET_PPC64)
+static always_inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
+{
+    TCGv t0, t1;
+    t0 = tcg_temp_local_new();
+    t1 = tcg_temp_local_new();
+    if (s) {
+        tcg_gen_ext32s_tl(t0, arg0);
+        tcg_gen_ext32s_tl(t1, arg1);
+    } else {
+        tcg_gen_ext32u_tl(t0, arg0);
+        tcg_gen_ext32u_tl(t1, arg1);
+    }
+    gen_op_cmp(t0, t1, s, crf);
+    tcg_temp_free(t1);
+    tcg_temp_free(t0);
 }
 
-/* Two operands arithmetic functions */
-#define GEN_INT_ARITH2_64(name, opc1, opc2, opc3, type)                       \
-__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000000, type)                 \
-__GEN_INT_ARITH2_O_64(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type)
-
-/* Two operands arithmetic functions with no overflow allowed */
-#define GEN_INT_ARITHN_64(name, opc1, opc2, opc3, type)                       \
-__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000400, type)
-
-/* One operand arithmetic functions */
-#define GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type)                       \
-__GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type)                             \
-__GEN_INT_ARITH1_O_64(name##o, opc1, opc2, opc3 | 0x10, type)
-#else
-#define GEN_INT_ARITH2_64 GEN_INT_ARITH2
-#define GEN_INT_ARITHN_64 GEN_INT_ARITHN
-#define GEN_INT_ARITH1_64 GEN_INT_ARITH1
-#endif
-
-/* add    add.    addo    addo.    */
-static always_inline void gen_op_add (void)
+static always_inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
 {
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+    TCGv t0 = tcg_const_local_tl(arg1);
+    gen_op_cmp32(arg0, t0, s, crf);
+    tcg_temp_free(t0);
 }
-static always_inline void gen_op_addo (void)
+#endif
+
+static always_inline void gen_set_Rc0 (DisasContext *ctx, TCGv reg)
 {
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addo();
-}
 #if defined(TARGET_PPC64)
-#define gen_op_add_64 gen_op_add
-static always_inline void gen_op_addo_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addo_64();
-}
+    if (!(ctx->sf_mode))
+        gen_op_cmpi32(reg, 0, 1, 0);
+    else
 #endif
-GEN_INT_ARITH2_64 (add,    0x1F, 0x0A, 0x08, PPC_INTEGER);
-/* addc   addc.   addco   addco.   */
-static always_inline void gen_op_addc (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addc();
+        gen_op_cmpi(reg, 0, 1, 0);
 }
-static always_inline void gen_op_addco (void)
+
+/* cmp */
+GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addc();
-    gen_op_check_addo();
-}
 #if defined(TARGET_PPC64)
-static always_inline void gen_op_addc_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addc_64();
-}
-static always_inline void gen_op_addco_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
-    gen_op_check_addc_64();
-    gen_op_check_addo_64();
-}
+    if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+        gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+                     1, crfD(ctx->opcode));
+    else
 #endif
-GEN_INT_ARITH2_64 (addc,   0x1F, 0x0A, 0x00, PPC_INTEGER);
-/* adde   adde.   addeo   addeo.   */
-static always_inline void gen_op_addeo (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_adde();
-    gen_op_check_addo();
-}
-#if defined(TARGET_PPC64)
-static always_inline void gen_op_addeo_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_adde_64();
-    gen_op_check_addo_64();
+        gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+                   1, crfD(ctx->opcode));
 }
-#endif
-GEN_INT_ARITH2_64 (adde,   0x1F, 0x0A, 0x04, PPC_INTEGER);
-/* addme  addme.  addmeo  addmeo.  */
-static always_inline void gen_op_addme (void)
+
+/* cmpi */
+GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
-    gen_op_add_me();
-}
 #if defined(TARGET_PPC64)
-static always_inline void gen_op_addme_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
-    gen_op_add_me_64();
-}
+    if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+        gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+                      1, crfD(ctx->opcode));
+    else
 #endif
-GEN_INT_ARITH1_64 (addme,  0x1F, 0x0A, 0x07, PPC_INTEGER);
-/* addze  addze.  addzeo  addzeo.  */
-static always_inline void gen_op_addze (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_add_ze();
-    gen_op_check_addc();
+        gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+                    1, crfD(ctx->opcode));
 }
-static always_inline void gen_op_addzeo (void)
+
+/* cmpl */
+GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_add_ze();
-    gen_op_check_addc();
-    gen_op_check_addo();
-}
 #if defined(TARGET_PPC64)
-static always_inline void gen_op_addze_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_add_ze();
-    gen_op_check_addc_64();
-}
-static always_inline void gen_op_addzeo_64 (void)
-{
-    tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-    gen_op_add_ze();
-    gen_op_check_addc_64();
-    gen_op_check_addo_64();
-}
+    if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+        gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+                     0, crfD(ctx->opcode));
+    else
 #endif
-GEN_INT_ARITH1_64 (addze,  0x1F, 0x0A, 0x06, PPC_INTEGER);
-/* divw   divw.   divwo   divwo.   */
-GEN_INT_ARITH2 (divw,   0x1F, 0x0B, 0x0F, PPC_INTEGER);
-/* divwu  divwu.  divwuo  divwuo.  */
-GEN_INT_ARITH2 (divwu,  0x1F, 0x0B, 0x0E, PPC_INTEGER);
-/* mulhw  mulhw.                   */
-GEN_INT_ARITHN (mulhw,  0x1F, 0x0B, 0x02, PPC_INTEGER);
-/* mulhwu mulhwu.                  */
-GEN_INT_ARITHN (mulhwu, 0x1F, 0x0B, 0x00, PPC_INTEGER);
-/* mullw  mullw.  mullwo  mullwo.  */
-GEN_INT_ARITH2 (mullw,  0x1F, 0x0B, 0x07, PPC_INTEGER);
-/* neg    neg.    nego    nego.    */
-GEN_INT_ARITH1_64 (neg,    0x1F, 0x08, 0x03, PPC_INTEGER);
-/* subf   subf.   subfo   subfo.   */
-static always_inline void gen_op_subf (void)
-{
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
+        gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+                   0, crfD(ctx->opcode));
 }
-static always_inline void gen_op_subfo (void)
+
+/* cmpli */
+GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
 {
-    tcg_gen_not_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_addo();
-}
 #if defined(TARGET_PPC64)
-#define gen_op_subf_64 gen_op_subf
-static always_inline void gen_op_subfo_64 (void)
-{
-    tcg_gen_not_i64(cpu_T[2], cpu_T[0]);
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_addo_64();
-}
+    if (!(ctx->sf_mode && (ctx->opcode & 0x00200000)))
+        gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+                      0, crfD(ctx->opcode));
+    else
 #endif
-GEN_INT_ARITH2_64 (subf,   0x1F, 0x08, 0x01, PPC_INTEGER);
-/* subfc  subfc.  subfco  subfco.  */
-static always_inline void gen_op_subfc (void)
-{
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_subfc();
+        gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+                    0, crfD(ctx->opcode));
 }
-static always_inline void gen_op_subfco (void)
+
+/* isel (PowerPC 2.03 specification) */
+GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL)
 {
-    tcg_gen_not_tl(cpu_T[2], cpu_T[0]);
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_subfc();
-    gen_op_check_addo();
+    int l1, l2;
+    uint32_t bi = rC(ctx->opcode);
+    uint32_t mask;
+    TCGv_i32 t0;
+
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+
+    mask = 1 << (3 - (bi & 0x03));
+    t0 = tcg_temp_new_i32();
+    tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
+    tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+    if (rA(ctx->opcode) == 0)
+        tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+    else
+        tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    gen_set_label(l2);
+    tcg_temp_free_i32(t0);
 }
-#if defined(TARGET_PPC64)
-static always_inline void gen_op_subfc_64 (void)
+
+/***                           Integer arithmetic                          ***/
+
+static always_inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, TCGv arg1, TCGv arg2, int sub)
 {
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_subfc_64();
+    int l1;
+    TCGv t0;
+
+    l1 = gen_new_label();
+    /* Start with XER OV disabled, the most likely case */
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    t0 = tcg_temp_local_new();
+    tcg_gen_xor_tl(t0, arg0, arg1);
+#if defined(TARGET_PPC64)
+    if (!ctx->sf_mode)
+        tcg_gen_ext32s_tl(t0, t0);
+#endif
+    if (sub)
+        tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
+    else
+        tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+    tcg_gen_xor_tl(t0, arg1, arg2);
+#if defined(TARGET_PPC64)
+    if (!ctx->sf_mode)
+        tcg_gen_ext32s_tl(t0, t0);
+#endif
+    if (sub)
+        tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+    else
+        tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1);
+    tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+    gen_set_label(l1);
+    tcg_temp_free(t0);
 }
-static always_inline void gen_op_subfco_64 (void)
+
+static always_inline void gen_op_arith_compute_ca(DisasContext *ctx, TCGv arg1, TCGv arg2, int sub)
 {
-    tcg_gen_not_i64(cpu_T[2], cpu_T[0]);
-    tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
-    gen_op_check_subfc_64();
-    gen_op_check_addo_64();
-}
+    int l1 = gen_new_label();
+
+#if defined(TARGET_PPC64)
+    if (!(ctx->sf_mode)) {
+        TCGv t0, t1;
+        t0 = tcg_temp_new();
+        t1 = tcg_temp_new();
+
+        tcg_gen_ext32u_tl(t0, arg1);
+        tcg_gen_ext32u_tl(t1, arg2);
+        if (sub) {
+            tcg_gen_brcond_tl(TCG_COND_GTU, t0, t1, l1);
+        } else {
+            tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+        }
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+        gen_set_label(l1);
+        tcg_temp_free(t0);
+        tcg_temp_free(t1);
+    } else
 #endif
-GEN_INT_ARITH2_64 (subfc,  0x1F, 0x08, 0x00, PPC_INTEGER);
-/* subfe  subfe.  subfeo  subfeo.  */
-static always_inline void gen_op_subfeo (void)
-{
-    tcg_gen_not_tl(cpu_T[2], cpu_T[0]);
-    gen_op_subfe();
-    gen_op_check_addo();
+    {
+        if (sub) {
+            tcg_gen_brcond_tl(TCG_COND_GTU, arg1, arg2, l1);
+        } else {
+            tcg_gen_brcond_tl(TCG_COND_GEU, arg1, arg2, l1);
+        }
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+        gen_set_label(l1);
+    }
 }
-#if defined(TARGET_PPC64)
-#define gen_op_subfe_64 gen_op_subfe
-static always_inline void gen_op_subfeo_64 (void)
+
+/* Common add function */
+static always_inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2,
+                                           int add_ca, int compute_ca, int compute_ov)
 {
-    tcg_gen_not_i64(cpu_T[2], cpu_T[0]);
-    gen_op_subfe_64();
-    gen_op_check_addo_64();
+    TCGv t0, t1;
+
+    if ((!compute_ca && !compute_ov) ||
+        (!TCGV_EQUAL(ret,arg1) && !TCGV_EQUAL(ret, arg2)))  {
+        t0 = ret;
+    } else {
+        t0 = tcg_temp_local_new();
+    }
+
+    if (add_ca) {
+        t1 = tcg_temp_local_new();
+        tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
+        tcg_gen_shri_tl(t1, t1, XER_CA);
+    }
+
+    if (compute_ca && compute_ov) {
+        /* Start with XER CA and OV disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
+    } else if (compute_ca) {
+        /* Start with XER CA disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+    } else if (compute_ov) {
+        /* Start with XER OV disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    }
+
+    tcg_gen_add_tl(t0, arg1, arg2);
+
+    if (compute_ca) {
+        gen_op_arith_compute_ca(ctx, t0, arg1, 0);
+    }
+    if (add_ca) {
+        tcg_gen_add_tl(t0, t0, t1);
+        gen_op_arith_compute_ca(ctx, t0, t1, 0);
+        tcg_temp_free(t1);
+    }
+    if (compute_ov) {
+        gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
+    }
+
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, t0);
+
+    if (!TCGV_EQUAL(t0, ret)) {
+        tcg_gen_mov_tl(ret, t0);
+        tcg_temp_free(t0);
+    }
 }
-#endif
-GEN_INT_ARITH2_64 (subfe,  0x1F, 0x08, 0x04, PPC_INTEGER);
-/* subfme subfme. subfmeo subfmeo. */
-GEN_INT_ARITH1_64 (subfme, 0x1F, 0x08, 0x07, PPC_INTEGER);
-/* subfze subfze. subfzeo subfzeo. */
-GEN_INT_ARITH1_64 (subfze, 0x1F, 0x08, 0x06, PPC_INTEGER);
+/* Add functions with two operands */
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER)                  \
+{                                                                             \
+    gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
+                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
+                     add_ca, compute_ca, compute_ov);                         \
+}
+/* Add functions with one operand and one immediate */
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
+                                add_ca, compute_ca, compute_ov)               \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER)                  \
+{                                                                             \
+    TCGv t0 = tcg_const_local_tl(const_val);                                  \
+    gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
+                     cpu_gpr[rA(ctx->opcode)], t0,                            \
+                     add_ca, compute_ca, compute_ov);                         \
+    tcg_temp_free(t0);                                                        \
+}
+
+/* add  add.  addo  addo. */
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+/* addc  addc.  addco  addco. */
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+/* adde  adde.  addeo  addeo. */
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+/* addme  addme.  addmeo  addmeo.  */
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+/* addze  addze.  addzeo  addzeo.*/
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
 /* addi */
 GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
@@ -1033,54 +1021,40 @@ GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 
     if (rA(ctx->opcode) == 0) {
         /* li case */
-        tcg_gen_movi_tl(cpu_T[0], simm);
+        tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
     } else {
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-        if (likely(simm != 0))
-            tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm);
+        tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm);
     }
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
 }
-/* addic */
-GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+/* addic  addic.*/
+static always_inline void gen_op_addic (DisasContext *ctx, TCGv ret, TCGv arg1,
+                                        int compute_Rc0)
 {
     target_long simm = SIMM(ctx->opcode);
 
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
+    /* Start with XER CA and OV disabled, the most likely case */
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+
     if (likely(simm != 0)) {
-        tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm);
-#if defined(TARGET_PPC64)
-        if (ctx->sf_mode)
-            gen_op_check_addc_64();
-        else
-#endif
-            gen_op_check_addc();
+        TCGv t0 = tcg_temp_local_new();
+        tcg_gen_addi_tl(t0, arg1, simm);
+        gen_op_arith_compute_ca(ctx, t0, arg1, 0);
+        tcg_gen_mov_tl(ret, t0);
+        tcg_temp_free(t0);
     } else {
-        gen_op_clear_xer_ca();
+        tcg_gen_mov_tl(ret, arg1);
     }
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+    if (compute_Rc0) {
+        gen_set_Rc0(ctx, ret);
+    }
+}
+GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+{
+    gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
 }
-/* addic. */
 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
-    target_long simm = SIMM(ctx->opcode);
-
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-    if (likely(simm != 0)) {
-        tcg_gen_mov_tl(cpu_T[2], cpu_T[0]);
-        tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm);
-#if defined(TARGET_PPC64)
-        if (ctx->sf_mode)
-            gen_op_check_addc_64();
-        else
-#endif
-            gen_op_check_addc();
-    } else {
-        gen_op_clear_xer_ca();
-    }
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
-    gen_set_Rc0(ctx);
+    gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
 }
 /* addis */
 GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
@@ -1089,177 +1063,431 @@ GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 
     if (rA(ctx->opcode) == 0) {
         /* lis case */
-        tcg_gen_movi_tl(cpu_T[0], simm << 16);
+        tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
     } else {
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-        if (likely(simm != 0))
-            tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm << 16);
+        tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16);
     }
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
 }
-/* mulli */
-GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+
+static always_inline void gen_op_arith_divw (DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2,
+                                             int sign, int compute_ov)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-    gen_op_mulli(SIMM(ctx->opcode));
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+    int l1 = gen_new_label();
+    int l2 = gen_new_label();
+    TCGv_i32 t0 = tcg_temp_local_new_i32();
+    TCGv_i32 t1 = tcg_temp_local_new_i32();
+
+    tcg_gen_trunc_tl_i32(t0, arg1);
+    tcg_gen_trunc_tl_i32(t1, arg2);
+    tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
+    if (sign) {
+        int l3 = gen_new_label();
+        tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
+        tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
+        gen_set_label(l3);
+        tcg_gen_div_i32(t0, t0, t1);
+    } else {
+        tcg_gen_divu_i32(t0, t0, t1);
+    }
+    if (compute_ov) {
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    }
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    if (sign) {
+        tcg_gen_sari_i32(t0, t0, 31);
+    } else {
+        tcg_gen_movi_i32(t0, 0);
+    }
+    if (compute_ov) {
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+    }
+    gen_set_label(l2);
+    tcg_gen_extu_i32_tl(ret, t0);
+    tcg_temp_free_i32(t0);
+    tcg_temp_free_i32(t1);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, ret);
 }
-/* subfic */
-GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
-{
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
+/* Div functions */
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
+GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)                  \
+{                                                                             \
+    gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
+                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
+                     sign, compute_ov);                                       \
+}
+/* divwu  divwu.  divwuo  divwuo.   */
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
+/* divw  divw.  divwo  divwo.   */
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
 #if defined(TARGET_PPC64)
-    if (ctx->sf_mode)
-        gen_op_subfic_64(SIMM(ctx->opcode));
-    else
-#endif
-        gen_op_subfic(SIMM(ctx->opcode));
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+static always_inline void gen_op_arith_divd (DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2,
+                                             int sign, int compute_ov)
+{
+    int l1 = gen_new_label();
+    int l2 = gen_new_label();
+
+    tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
+    if (sign) {
+        int l3 = gen_new_label();
+        tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
+        tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
+        gen_set_label(l3);
+        tcg_gen_div_i64(ret, arg1, arg2);
+    } else {
+        tcg_gen_divu_i64(ret, arg1, arg2);
+    }
+    if (compute_ov) {
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    }
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    if (sign) {
+        tcg_gen_sari_i64(ret, arg1, 63);
+    } else {
+        tcg_gen_movi_i64(ret, 0);
+    }
+    if (compute_ov) {
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+    }
+    gen_set_label(l2);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, ret);
 }
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)                      \
+{                                                                             \
+    gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
+                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
+                      sign, compute_ov);                                      \
+}
+/* divwu  divwu.  divwuo  divwuo.   */
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
+/* divw  divw.  divwo  divwo.   */
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
+#endif
+
+/* mulhw  mulhw. */
+GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER)
+{
+    TCGv_i64 t0, t1;
 
+    t0 = tcg_temp_new_i64();
+    t1 = tcg_temp_new_i64();
 #if defined(TARGET_PPC64)
-/* mulhd  mulhd.                   */
-GEN_INT_ARITHN (mulhd,  0x1F, 0x09, 0x02, PPC_64B);
-/* mulhdu mulhdu.                  */
-GEN_INT_ARITHN (mulhdu, 0x1F, 0x09, 0x00, PPC_64B);
-/* mulld  mulld.  mulldo  mulldo.  */
-GEN_INT_ARITH2 (mulld,  0x1F, 0x09, 0x07, PPC_64B);
-/* divd   divd.   divdo   divdo.   */
-GEN_INT_ARITH2 (divd,   0x1F, 0x09, 0x0F, PPC_64B);
-/* divdu  divdu.  divduo  divduo.  */
-GEN_INT_ARITH2 (divdu,  0x1F, 0x09, 0x0E, PPC_64B);
-#endif
+    tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mul_i64(t0, t0, t1);
+    tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
+#else
+    tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mul_i64(t0, t0, t1);
+    tcg_gen_shri_i64(t0, t0, 32);
+    tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+/* mulhwu  mulhwu.  */
+GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER)
+{
+    TCGv_i64 t0, t1;
 
-/***                           Integer comparison                          ***/
+    t0 = tcg_temp_new_i64();
+    t1 = tcg_temp_new_i64();
 #if defined(TARGET_PPC64)
-#define GEN_CMP(name, opc, type)                                              \
-GEN_HANDLER(name, 0x1F, 0x00, opc, 0x00400000, type)                          \
-{                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    if (ctx->sf_mode && (ctx->opcode & 0x00200000))                           \
-        gen_op_##name##_64();                                                 \
-    else                                                                      \
-        gen_op_##name();                                                      \
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);              \
+    tcg_gen_ext32u_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_ext32u_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mul_i64(t0, t0, t1);
+    tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32);
+#else
+    tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mul_i64(t0, t0, t1);
+    tcg_gen_shri_i64(t0, t0, 32);
+    tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+/* mullw  mullw. */
+GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER)
+{
+    tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+                   cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
 }
+/* mullwo  mullwo. */
+GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER)
+{
+    int l1;
+    TCGv_i64 t0, t1;
+
+    t0 = tcg_temp_new_i64();
+    t1 = tcg_temp_new_i64();
+    l1 = gen_new_label();
+    /* Start with XER OV disabled, the most likely case */
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+#if defined(TARGET_PPC64)
+    tcg_gen_ext32s_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_ext32s_i64(t1, cpu_gpr[rB(ctx->opcode)]);
 #else
-#define GEN_CMP(name, opc, type)                                              \
-GEN_HANDLER(name, 0x1F, 0x00, opc, 0x00400000, type)                          \
+    tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+    tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+#endif
+    tcg_gen_mul_i64(t0, t0, t1);
+#if defined(TARGET_PPC64)
+    tcg_gen_ext32s_i64(cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_brcond_i64(TCG_COND_EQ, t0, cpu_gpr[rD(ctx->opcode)], l1);
+#else
+    tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_ext32s_i64(t1, t0);
+    tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+#endif
+    tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+    gen_set_label(l1);
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+/* mulli */
+GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+{
+    tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+                    SIMM(ctx->opcode));
+}
+#if defined(TARGET_PPC64)
+#define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)                      \
 {                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);              \
+    gen_helper_##name (cpu_gpr[rD(ctx->opcode)],                              \
+                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);   \
+    if (unlikely(Rc(ctx->opcode) != 0))                                       \
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
 }
+/* mulhd  mulhd. */
+GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00);
+/* mulhdu  mulhdu. */
+GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02);
+/* mulld  mulld. */
+GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B)
+{
+    tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+                   cpu_gpr[rB(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+/* mulldo  mulldo. */
+GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17);
 #endif
 
-/* cmp */
-GEN_CMP(cmp, 0x00, PPC_INTEGER);
-/* cmpi */
-GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
+/* neg neg. nego nego. */
+static always_inline void gen_op_arith_neg (DisasContext *ctx, TCGv ret, TCGv arg1, int ov_check)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
+    int l1 = gen_new_label();
+    int l2 = gen_new_label();
+    TCGv t0 = tcg_temp_local_new();
 #if defined(TARGET_PPC64)
-    if (ctx->sf_mode && (ctx->opcode & 0x00200000))
-        gen_op_cmpi_64(SIMM(ctx->opcode));
-    else
+    if (ctx->sf_mode) {
+        tcg_gen_mov_tl(t0, arg1);
+        tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT64_MIN, l1);
+    } else
 #endif
-        gen_op_cmpi(SIMM(ctx->opcode));
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
+    {
+        tcg_gen_ext32s_tl(t0, arg1);
+        tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT32_MIN, l1);
+    }
+    tcg_gen_neg_tl(ret, arg1);
+    if (ov_check) {
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    }
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_mov_tl(ret, t0);
+    if (ov_check) {
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+    }
+    gen_set_label(l2);
+    tcg_temp_free(t0);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, ret);
 }
-/* cmpl */
-GEN_CMP(cmpl, 0x01, PPC_INTEGER);
-/* cmpli */
-GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
+GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-#if defined(TARGET_PPC64)
-    if (ctx->sf_mode && (ctx->opcode & 0x00200000))
-        gen_op_cmpli_64(UIMM(ctx->opcode));
-    else
-#endif
-        gen_op_cmpli(UIMM(ctx->opcode));
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
+    gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0);
+}
+GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER)
+{
+    gen_op_arith_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1);
 }
 
-/* isel (PowerPC 2.03 specification) */
-GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL)
+/* Common subf function */
+static always_inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2,
+                                            int add_ca, int compute_ca, int compute_ov)
 {
-    uint32_t bi = rC(ctx->opcode);
-    uint32_t mask;
+    TCGv t0, t1;
 
-    if (rA(ctx->opcode) == 0) {
-        tcg_gen_movi_tl(cpu_T[0], 0);
+    if ((!compute_ca && !compute_ov) ||
+        (!TCGV_EQUAL(ret, arg1) && !TCGV_EQUAL(ret, arg2)))  {
+        t0 = ret;
     } else {
-        tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
+        t0 = tcg_temp_local_new();
     }
-    tcg_gen_mov_tl(cpu_T[2], cpu_gpr[rB(ctx->opcode)]);
-    mask = 1 << (3 - (bi & 0x03));
-    tcg_gen_mov_i32(cpu_T[0], cpu_crf[bi >> 2]);
-    gen_op_test_true(mask);
-    gen_op_isel();
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+
+    if (add_ca) {
+        t1 = tcg_temp_local_new();
+        tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA));
+        tcg_gen_shri_tl(t1, t1, XER_CA);
+    }
+
+    if (compute_ca && compute_ov) {
+        /* Start with XER CA and OV disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV)));
+    } else if (compute_ca) {
+        /* Start with XER CA disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+    } else if (compute_ov) {
+        /* Start with XER OV disabled, the most likely case */
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+    }
+
+    if (add_ca) {
+        tcg_gen_not_tl(t0, arg1);
+        tcg_gen_add_tl(t0, t0, arg2);
+        gen_op_arith_compute_ca(ctx, t0, arg2, 0);
+        tcg_gen_add_tl(t0, t0, t1);
+        gen_op_arith_compute_ca(ctx, t0, t1, 0);
+        tcg_temp_free(t1);
+    } else {
+        tcg_gen_sub_tl(t0, arg2, arg1);
+        if (compute_ca) {
+            gen_op_arith_compute_ca(ctx, t0, arg2, 1);
+        }
+    }
+    if (compute_ov) {
+        gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
+    }
+
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, t0);
+
+    if (!TCGV_EQUAL(t0, ret)) {
+        tcg_gen_mov_tl(ret, t0);
+        tcg_temp_free(t0);
+    }
+}
+/* Sub functions with Two operands functions */
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER)                  \
+{                                                                             \
+    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
+                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
+                      add_ca, compute_ca, compute_ov);                        \
+}
+/* Sub functions with one operand and one immediate */
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
+                                add_ca, compute_ca, compute_ov)               \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER)                  \
+{                                                                             \
+    TCGv t0 = tcg_const_local_tl(const_val);                                  \
+    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
+                      cpu_gpr[rA(ctx->opcode)], t0,                           \
+                      add_ca, compute_ca, compute_ov);                        \
+    tcg_temp_free(t0);                                                        \
+}
+/* subf  subf.  subfo  subfo. */
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+/* subfc  subfc.  subfco  subfco. */
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+/* subfe  subfe.  subfeo  subfo. */
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+/* subfme  subfme.  subfmeo  subfmeo.  */
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+/* subfze  subfze.  subfzeo  subfzeo.*/
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+/* subfic */
+GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+{
+    /* Start with XER CA and OV disabled, the most likely case */
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+    TCGv t0 = tcg_temp_local_new();
+    TCGv t1 = tcg_const_local_tl(SIMM(ctx->opcode));
+    tcg_gen_sub_tl(t0, t1, cpu_gpr[rA(ctx->opcode)]);
+    gen_op_arith_compute_ca(ctx, t0, t1, 1);
+    tcg_temp_free(t1);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_temp_free(t0);
 }
 
 /***                            Integer logical                            ***/
-#define __GEN_LOGICAL2(name, opc2, opc3, type)                                \
-GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000000, type)                         \
+#define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
+GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)                          \
 {                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);                       \
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);                       \
+    tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
+       cpu_gpr[rB(ctx->opcode)]);                                             \
     if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
 }
-#define GEN_LOGICAL2(name, opc, type)                                         \
-__GEN_LOGICAL2(name, 0x1C, opc, type)
 
-#define GEN_LOGICAL1(name, opc, type)                                         \
+#define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)                          \
 {                                                                             \
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);                       \
-    gen_op_##name();                                                          \
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);                       \
+    tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
     if (unlikely(Rc(ctx->opcode) != 0))                                       \
-        gen_set_Rc0(ctx);                                                     \
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
 }
 
 /* and & and. */
-GEN_LOGICAL2(and, 0x00, PPC_INTEGER);
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
 /* andc & andc. */
-GEN_LOGICAL2(andc, 0x01, PPC_INTEGER);
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
 /* andi. */
 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], UIMM(ctx->opcode));
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
-    gen_set_Rc0(ctx);
+    tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
+    gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* andis. */
 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], UIMM(ctx->opcode) << 16);
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
-    gen_set_Rc0(ctx);
+    tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
+    gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
-
 /* cntlzw */
-GEN_LOGICAL1(cntlzw, 0x00, PPC_INTEGER);
+GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER)
+{
+    gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 /* eqv & eqv. */
-GEN_LOGICAL2(eqv, 0x08, PPC_INTEGER);
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
 /* extsb & extsb. */
-GEN_LOGICAL1(extsb, 0x1D, PPC_INTEGER);
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
 /* extsh & extsh. */
-GEN_LOGICAL1(extsh, 0x1C, PPC_INTEGER);
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
 /* nand & nand. */
-GEN_LOGICAL2(nand, 0x0E, PPC_INTEGER);
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
 /* nor & nor. */
-GEN_LOGICAL2(nor, 0x03, PPC_INTEGER);
-
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
 /* or & or. */
 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER)
 {
@@ -1270,55 +1498,54 @@ GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER)
     rb = rB(ctx->opcode);
     /* Optimisation for mr. ri case */
     if (rs != ra || rs != rb) {
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rs]);
-        if (rs != rb) {
-            tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rb]);
-            gen_op_or();
-        }
-        tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[0]);
+        if (rs != rb)
+            tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
+        else
+            tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
         if (unlikely(Rc(ctx->opcode) != 0))
-            gen_set_Rc0(ctx);
+            gen_set_Rc0(ctx, cpu_gpr[ra]);
     } else if (unlikely(Rc(ctx->opcode) != 0)) {
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rs]);
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rs]);
 #if defined(TARGET_PPC64)
     } else {
+        int prio = 0;
+
         switch (rs) {
         case 1:
             /* Set process priority to low */
-            gen_op_store_pri(2);
+            prio = 2;
             break;
         case 6:
             /* Set process priority to medium-low */
-            gen_op_store_pri(3);
+            prio = 3;
             break;
         case 2:
             /* Set process priority to normal */
-            gen_op_store_pri(4);
+            prio = 4;
             break;
 #if !defined(CONFIG_USER_ONLY)
         case 31:
             if (ctx->supervisor > 0) {
                 /* Set process priority to very low */
-                gen_op_store_pri(1);
+                prio = 1;
             }
             break;
         case 5:
             if (ctx->supervisor > 0) {
                 /* Set process priority to medium-hight */
-                gen_op_store_pri(5);
+                prio = 5;
             }
             break;
         case 3:
             if (ctx->supervisor > 0) {
                 /* Set process priority to high */
-                gen_op_store_pri(6);
+                prio = 6;
             }
             break;
         case 7:
             if (ctx->supervisor > 1) {
                 /* Set process priority to very high */
-                gen_op_store_pri(7);
+                prio = 7;
             }
             break;
 #endif
@@ -1326,26 +1553,29 @@ GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER)
             /* nop */
             break;
         }
+        if (prio) {
+            TCGv t0 = tcg_temp_new();
+            tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, spr[SPR_PPR]));
+            tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
+            tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
+            tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, spr[SPR_PPR]));
+            tcg_temp_free(t0);
+        }
 #endif
     }
 }
-
 /* orc & orc. */
-GEN_LOGICAL2(orc, 0x0C, PPC_INTEGER);
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
 /* xor & xor. */
 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
     /* Optimisation for "set to zero" case */
-    if (rS(ctx->opcode) != rB(ctx->opcode)) {
-        tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
-        gen_op_xor();
-    } else {
-        tcg_gen_movi_tl(cpu_T[0], 0);
-    }
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    if (rS(ctx->opcode) != rB(ctx->opcode))
+        tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    else
+        tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* ori */
 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
@@ -1357,10 +1587,7 @@ GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
         /* XXX: should handle special NOPs for POWER series */
         return;
     }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(uimm != 0))
-        gen_op_ori(uimm);
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
 }
 /* oris */
 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
@@ -1371,10 +1598,7 @@ GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
         /* NOP */
         return;
     }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(uimm != 0))
-        gen_op_ori(uimm << 16);
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
 }
 /* xori */
 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
@@ -1385,12 +1609,8 @@ GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
         /* NOP */
         return;
     }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(uimm != 0))
-        gen_op_xori(uimm);
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
 }
-
 /* xoris */
 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
@@ -1400,70 +1620,69 @@ GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
         /* NOP */
         return;
     }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(uimm != 0))
-        gen_op_xori(uimm << 16);
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
 }
-
 /* popcntb : PowerPC 2.03 specification */
 GEN_HANDLER(popcntb, 0x1F, 0x03, 0x03, 0x0000F801, PPC_POPCNTB)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
 #if defined(TARGET_PPC64)
     if (ctx->sf_mode)
-        gen_op_popcntb_64();
+        gen_helper_popcntb_64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
     else
 #endif
-        gen_op_popcntb();
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+        gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
 }
 
 #if defined(TARGET_PPC64)
 /* extsw & extsw. */
-GEN_LOGICAL1(extsw, 0x1E, PPC_64B);
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
 /* cntlzd */
-GEN_LOGICAL1(cntlzd, 0x01, PPC_64B);
+GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B)
+{
+    gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 #endif
 
 /***                             Integer rotate                            ***/
 /* rlwimi & rlwimi. */
 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
-    target_ulong mask;
     uint32_t mb, me, sh;
 
     mb = MB(ctx->opcode);
     me = ME(ctx->opcode);
     sh = SH(ctx->opcode);
-    if (likely(sh == 0)) {
-        if (likely(mb == 0 && me == 31)) {
-            tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-            goto do_store;
-        } else if (likely(mb == 31 && me == 0)) {
-            tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-            goto do_store;
-        }
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-        tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
-        goto do_mask;
-    }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
-    gen_op_rotli32_T0(SH(ctx->opcode));
- do_mask:
+    if (likely(sh == 0 && mb == 0 && me == 31)) {
+        tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+    } else {
+        target_ulong mask;
+        TCGv t1;
+        TCGv t0 = tcg_temp_new();
 #if defined(TARGET_PPC64)
-    mb += 32;
-    me += 32;
-#endif
-    mask = MASK(mb, me);
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], mask);
-    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], ~mask);
-    gen_op_or();
- do_store:
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+        TCGv_i32 t2 = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_rotli_i32(t2, t2, sh);
+        tcg_gen_extu_i32_i64(t0, t2);
+        tcg_temp_free_i32(t2);
+#else
+        tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
+#endif
+#if defined(TARGET_PPC64)
+        mb += 32;
+        me += 32;
+#endif
+        mask = MASK(mb, me);
+        t1 = tcg_temp_new();
+        tcg_gen_andi_tl(t0, t0, mask);
+        tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
+        tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+        tcg_temp_free(t0);
+        tcg_temp_free(t1);
+    }
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* rlwinm & rlwinm. */
 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
@@ -1473,56 +1692,81 @@ GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
     sh = SH(ctx->opcode);
     mb = MB(ctx->opcode);
     me = ME(ctx->opcode);
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(sh == 0)) {
-        goto do_mask;
-    }
-    if (likely(mb == 0)) {
-        if (likely(me == 31)) {
-            gen_op_rotli32_T0(sh);
-            goto do_store;
-        } else if (likely(me == (31 - sh))) {
-            gen_op_sli_T0(sh);
-            goto do_store;
-        }
-    } else if (likely(me == 31)) {
-        if (likely(sh == (32 - mb))) {
-            gen_op_srli_T0(mb);
-            goto do_store;
+
+    if (likely(mb == 0 && me == (31 - sh))) {
+        if (likely(sh == 0)) {
+            tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+        } else {
+            TCGv t0 = tcg_temp_new();
+            tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+            tcg_gen_shli_tl(t0, t0, sh);
+            tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
+            tcg_temp_free(t0);
         }
-    }
-    gen_op_rotli32_T0(sh);
- do_mask:
+    } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
+        TCGv t0 = tcg_temp_new();
+        tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_shri_tl(t0, t0, mb);
+        tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
+        tcg_temp_free(t0);
+    } else {
+        TCGv t0 = tcg_temp_new();
 #if defined(TARGET_PPC64)
-    mb += 32;
-    me += 32;
+        TCGv_i32 t1 = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_rotli_i32(t1, t1, sh);
+        tcg_gen_extu_i32_i64(t0, t1);
+        tcg_temp_free_i32(t1);
+#else
+        tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
 #endif
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
- do_store:
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+#if defined(TARGET_PPC64)
+        mb += 32;
+        me += 32;
+#endif
+        tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+        tcg_temp_free(t0);
+    }
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* rlwnm & rlwnm. */
 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
 {
     uint32_t mb, me;
+    TCGv t0;
+#if defined(TARGET_PPC64)
+    TCGv_i32 t1, t2;
+#endif
 
     mb = MB(ctx->opcode);
     me = ME(ctx->opcode);
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
-    gen_op_rotl32_T0_T1();
+    t0 = tcg_temp_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
+#if defined(TARGET_PPC64)
+    t1 = tcg_temp_new_i32();
+    t2 = tcg_temp_new_i32();
+    tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]);
+    tcg_gen_trunc_i64_i32(t2, t0);
+    tcg_gen_rotl_i32(t1, t1, t2);
+    tcg_gen_extu_i32_i64(t0, t1);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t2);
+#else
+    tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
+#endif
     if (unlikely(mb != 0 || me != 31)) {
 #if defined(TARGET_PPC64)
         mb += 32;
         me += 32;
 #endif
-        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
+        tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+    } else {
+        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
     }
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_temp_free(t0);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 
 #if defined(TARGET_PPC64)
@@ -1560,31 +1804,22 @@ GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
 static always_inline void gen_rldinm (DisasContext *ctx, uint32_t mb,
                                       uint32_t me, uint32_t sh)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (likely(sh == 0)) {
-        goto do_mask;
-    }
-    if (likely(mb == 0)) {
-        if (likely(me == 63)) {
-            gen_op_rotli64_T0(sh);
-            goto do_store;
-        } else if (likely(me == (63 - sh))) {
-            gen_op_sli_T0(sh);
-            goto do_store;
-        }
-    } else if (likely(me == 63)) {
-        if (likely(sh == (64 - mb))) {
-            gen_op_srli_T0_64(mb);
-            goto do_store;
+    if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
+        tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+    } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
+        tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
+    } else {
+        TCGv t0 = tcg_temp_new();
+        tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+        if (likely(mb == 0 && me == 63)) {
+            tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+        } else {
+            tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
         }
+        tcg_temp_free(t0);
     }
-    gen_op_rotli64_T0(sh);
- do_mask:
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
- do_store:
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* rldicl - rldicl. */
 static always_inline void gen_rldicl (DisasContext *ctx, int mbn, int shn)
@@ -1620,15 +1855,21 @@ GEN_PPC64_R4(rldic, 0x1E, 0x04);
 static always_inline void gen_rldnm (DisasContext *ctx, uint32_t mb,
                                      uint32_t me)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
-    gen_op_rotl64_T0_T1();
+    TCGv t0;
+
+    mb = MB(ctx->opcode);
+    me = ME(ctx->opcode);
+    t0 = tcg_temp_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
+    tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
     if (unlikely(mb != 0 || me != 63)) {
-        tcg_gen_andi_tl(cpu_T[0], cpu_T[0], MASK(mb, me));
+        tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
+    } else {
+        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
     }
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+    tcg_temp_free(t0);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 
 /* rldcl - rldcl. */
@@ -1652,87 +1893,170 @@ GEN_PPC64_R2(rldcr, 0x1E, 0x09);
 /* rldimi - rldimi. */
 static always_inline void gen_rldimi (DisasContext *ctx, int mbn, int shn)
 {
-    uint64_t mask;
     uint32_t sh, mb, me;
 
     sh = SH(ctx->opcode) | (shn << 5);
     mb = MB(ctx->opcode) | (mbn << 5);
     me = 63 - sh;
-    if (likely(sh == 0)) {
-        if (likely(mb == 0)) {
-            tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-            goto do_store;
-        }
-        tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-        tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
-        goto do_mask;
+    if (unlikely(sh == 0 && mb == 0)) {
+        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+    } else {
+        TCGv t0, t1;
+        target_ulong mask;
+
+        t0 = tcg_temp_new();
+        tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+        t1 = tcg_temp_new();
+        mask = MASK(mb, me);
+        tcg_gen_andi_tl(t0, t0, mask);
+        tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
+        tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+        tcg_temp_free(t0);
+        tcg_temp_free(t1);
     }
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
-    gen_op_rotli64_T0(sh);
- do_mask:
-    mask = MASK(mb, me);
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], mask);
-    tcg_gen_andi_tl(cpu_T[1], cpu_T[1], ~mask);
-    gen_op_or();
- do_store:
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
 #endif
 
 /***                             Integer shift                             ***/
 /* slw & slw. */
-__GEN_LOGICAL2(slw, 0x18, 0x00, PPC_INTEGER);
+GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER)
+{
+    TCGv t0;
+    int l1, l2;
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+
+    t0 = tcg_temp_local_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
+    tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0x20, l1);
+    tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t0);
+    tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+    gen_set_label(l2);
+    tcg_temp_free(t0);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 /* sraw & sraw. */
-__GEN_LOGICAL2(sraw, 0x18, 0x18, PPC_INTEGER);
+GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER)
+{
+    gen_helper_sraw(cpu_gpr[rA(ctx->opcode)],
+                    cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 /* srawi & srawi. */
 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER)
 {
-    int mb, me;
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    if (SH(ctx->opcode) != 0) {
-        tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
-        mb = 32 - SH(ctx->opcode);
-        me = 31;
-#if defined(TARGET_PPC64)
-        mb += 32;
-        me += 32;
-#endif
-        gen_op_srawi(SH(ctx->opcode), MASK(mb, me));
+    int sh = SH(ctx->opcode);
+    if (sh != 0) {
+        int l1, l2;
+        TCGv t0;
+        l1 = gen_new_label();
+        l2 = gen_new_label();
+        t0 = tcg_temp_local_new();
+        tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1);
+        tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
+        tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+        tcg_gen_br(l2);
+        gen_set_label(l1);
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+        gen_set_label(l2);
+        tcg_gen_ext32s_tl(t0, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], t0, sh);
+        tcg_temp_free(t0);
+    } else {
+        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
     }
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 /* srw & srw. */
-__GEN_LOGICAL2(srw, 0x18, 0x10, PPC_INTEGER);
-
+GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER)
+{
+    TCGv t0, t1;
+    int l1, l2;
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+
+    t0 = tcg_temp_local_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
+    tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0x20, l1);
+    tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    t1 = tcg_temp_new();
+    tcg_gen_ext32u_tl(t1, cpu_gpr[rS(ctx->opcode)]);
+    tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t1, t0);
+    tcg_temp_free(t1);
+    gen_set_label(l2);
+    tcg_temp_free(t0);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 #if defined(TARGET_PPC64)
 /* sld & sld. */
-__GEN_LOGICAL2(sld, 0x1B, 0x00, PPC_64B);
+GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B)
+{
+    TCGv t0;
+    int l1, l2;
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+
+    t0 = tcg_temp_local_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x7f);
+    tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0x40, l1);
+    tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t0);
+    gen_set_label(l2);
+    tcg_temp_free(t0);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 /* srad & srad. */
-__GEN_LOGICAL2(srad, 0x1A, 0x18, PPC_64B);
+GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B)
+{
+    gen_helper_srad(cpu_gpr[rA(ctx->opcode)],
+                    cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 /* sradi & sradi. */
 static always_inline void gen_sradi (DisasContext *ctx, int n)
 {
-    uint64_t mask;
-    int sh, mb, me;
-
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
-    sh = SH(ctx->opcode) + (n << 5);
+    int sh = SH(ctx->opcode) + (n << 5);
     if (sh != 0) {
-        tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
-        mb = 64 - SH(ctx->opcode);
-        me = 63;
-        mask = MASK(mb, me);
-        gen_op_sradi(sh, mask >> 32, mask);
+        int l1, l2;
+        TCGv t0;
+        l1 = gen_new_label();
+        l2 = gen_new_label();
+        t0 = tcg_temp_local_new();
+        tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+        tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1);
+        tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+        tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA);
+        tcg_gen_br(l2);
+        gen_set_label(l1);
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+        gen_set_label(l2);
+        tcg_temp_free(t0);
+        tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+    } else {
+        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
     }
-    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
 }
 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B)
 {
@@ -1743,7 +2067,25 @@ GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B)
     gen_sradi(ctx, 1);
 }
 /* srd & srd. */
-__GEN_LOGICAL2(srd, 0x1B, 0x10, PPC_64B);
+GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B)
+{
+    TCGv t0;
+    int l1, l2;
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+
+    t0 = tcg_temp_local_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x7f);
+    tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0x40, l1);
+    tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t0);
+    gen_set_label(l2);
+    tcg_temp_free(t0);
+    if (unlikely(Rc(ctx->opcode) != 0))
+        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
 #endif
 
 /***                       Floating-Point arithmetic                       ***/
@@ -1943,8 +2285,7 @@ GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT)
     tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]);
     tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rB(ctx->opcode)]);
     gen_reset_fpstatus();
-    gen_op_fcmpo();
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
+    gen_helper_fcmpo(cpu_crf[crfD(ctx->opcode)]);
     gen_op_float_check_status();
 }
 
@@ -1958,8 +2299,7 @@ GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT)
     tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]);
     tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rB(ctx->opcode)]);
     gen_reset_fpstatus();
-    gen_op_fcmpu();
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
+    gen_helper_fcmpu(cpu_crf[crfD(ctx->opcode)]);
     gen_op_float_check_status();
 }
 
@@ -2000,8 +2340,8 @@ GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT)
     }
     gen_optimize_fprf();
     bfa = 4 * (7 - crfS(ctx->opcode));
-    gen_op_load_fpscr_T0(bfa);
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
+    tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
+    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
     gen_op_fpscr_resetbit(~(0xF << bfa));
 }
 
@@ -2034,8 +2374,7 @@ GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT)
     if (likely(crb != 30 && crb != 29))
         gen_op_fpscr_resetbit(~(1 << crb));
     if (unlikely(Rc(ctx->opcode) != 0)) {
-        gen_op_load_fpcc();
-        gen_op_set_Rc0();
+        tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
     }
 }
 
@@ -2055,8 +2394,7 @@ GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT)
     if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI))
         gen_op_fpscr_setbit(crb);
     if (unlikely(Rc(ctx->opcode) != 0)) {
-        gen_op_load_fpcc();
-        gen_op_set_Rc0();
+        tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
     }
     /* We can raise a differed exception */
     gen_op_float_check_status();
@@ -2074,8 +2412,7 @@ GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x02010000, PPC_FLOAT)
     gen_reset_fpstatus();
     gen_op_store_fpscr(FM(ctx->opcode));
     if (unlikely(Rc(ctx->opcode) != 0)) {
-        gen_op_load_fpcc();
-        gen_op_set_Rc0();
+        tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
     }
     /* We can raise a differed exception */
     gen_op_float_check_status();
@@ -2097,8 +2434,7 @@ GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006f0800, PPC_FLOAT)
     gen_reset_fpstatus();
     gen_op_store_fpscr(1 << sh);
     if (unlikely(Rc(ctx->opcode) != 0)) {
-        gen_op_load_fpcc();
-        gen_op_set_Rc0();
+        tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
     }
     /* We can raise a differed exception */
     gen_op_float_check_status();
@@ -2189,7 +2525,7 @@ static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags
     if (likely(flags & 2))                                                       \
         tcg_gen_qemu_ld##width(t0, t1, flags >> 2);                              \
     else {                                                                       \
-        TCGv addr = tcg_temp_new(TCG_TYPE_TL);                                   \
+        TCGv addr = tcg_temp_new();                                   \
         tcg_gen_ext32u_tl(addr, t1);                                             \
         tcg_gen_qemu_ld##width(t0, addr, flags >> 2);                            \
         tcg_temp_free(addr);                                                     \
@@ -2209,7 +2545,7 @@ static always_inline void gen_qemu_st##width##_ppc64(TCGv t0, TCGv t1, int flags
     if (likely(flags & 2))                                                       \
         tcg_gen_qemu_st##width(t0, t1, flags >> 2);                              \
     else {                                                                       \
-        TCGv addr = tcg_temp_new(TCG_TYPE_TL);                                   \
+        TCGv addr = tcg_temp_new();                                   \
         tcg_gen_ext32u_tl(addr, t1);                                             \
         tcg_gen_qemu_st##width(t0, addr, flags >> 2);                            \
         tcg_temp_free(addr);                                                     \
@@ -2220,135 +2556,137 @@ GEN_QEMU_ST_PPC64(16)
 GEN_QEMU_ST_PPC64(32)
 GEN_QEMU_ST_PPC64(64)
 
-static always_inline void gen_qemu_ld8u(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld8u_ppc64(t0, t1, flags);
+    gen_qemu_ld8u_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld8s(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld8s_ppc64(t0, t1, flags);
+    gen_qemu_ld8s_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld16u(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv t0_32;
-        gen_qemu_ld16u_ppc64(t0, t1, flags);
-        t0_32 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(t0_32, t0);
-        tcg_gen_bswap16_i32(t0_32, t0_32);
-        tcg_gen_extu_i32_tl(t0, t0_32);
-        tcg_temp_free(t0_32);
+        TCGv_i32 t0;
+        gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_bswap16_i32(t0, t0);
+        tcg_gen_extu_i32_tl(arg0, t0);
+        tcg_temp_free_i32(t0);
     } else
-        gen_qemu_ld16u_ppc64(t0, t1, flags);
+        gen_qemu_ld16u_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld16s(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv t0_32;
-        gen_qemu_ld16u_ppc64(t0, t1, flags);
-        t0_32 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(t0_32, t0);
-        tcg_gen_bswap16_i32(t0_32, t0_32);
-        tcg_gen_extu_i32_tl(t0, t0_32);
-        tcg_gen_ext16s_tl(t0, t0);
-        tcg_temp_free(t0_32);
+        TCGv_i32 t0;
+        gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_bswap16_i32(t0, t0);
+        tcg_gen_extu_i32_tl(arg0, t0);
+        tcg_gen_ext16s_tl(arg0, arg0);
+        tcg_temp_free_i32(t0);
     } else
-        gen_qemu_ld16s_ppc64(t0, t1, flags);
+        gen_qemu_ld16s_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld32u(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv t0_32;
-        gen_qemu_ld32u_ppc64(t0, t1, flags);
-        t0_32 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(t0_32, t0);
-        tcg_gen_bswap_i32(t0_32, t0_32);
-        tcg_gen_extu_i32_tl(t0, t0_32);
-        tcg_temp_free(t0_32);
+        TCGv_i32 t0;
+        gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_bswap_i32(t0, t0);
+        tcg_gen_extu_i32_tl(arg0, t0);
+        tcg_temp_free_i32(t0);
     } else
-        gen_qemu_ld32u_ppc64(t0, t1, flags);
+        gen_qemu_ld32u_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld32s(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld32s(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv t0_32;
-        gen_qemu_ld32u_ppc64(t0, t1, flags);
-        t0_32 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(t0_32, t0);
-        tcg_gen_bswap_i32(t0_32, t0_32);
-        tcg_gen_ext_i32_tl(t0, t0_32);
-        tcg_temp_free(t0_32);
+        TCGv_i32 t0;
+        gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_bswap_i32(t0, t0);
+        tcg_gen_ext_i32_tl(arg0, t0);
+        tcg_temp_free_i32(t0);
     } else
-        gen_qemu_ld32s_ppc64(t0, t1, flags);
+        gen_qemu_ld32s_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld64(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld64(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld64_ppc64(t0, t1, flags);
+    gen_qemu_ld64_ppc64(arg0, arg1, flags);
     if (unlikely(flags & 1))
-       tcg_gen_bswap_i64(t0, t0);
+        tcg_gen_bswap_i64(arg0, arg0);
 }
 
-static always_inline void gen_qemu_st8(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_st8_ppc64(t0, t1, flags);
+    gen_qemu_st8_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_st16(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv temp1, temp2;
-        temp1 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(temp1, t0);
-        tcg_gen_ext16u_i32(temp1, temp1);
-        tcg_gen_bswap16_i32(temp1, temp1);
-        temp2 = tcg_temp_new(TCG_TYPE_I64);
-        tcg_gen_extu_i32_tl(temp2, temp1);
-        tcg_temp_free(temp1);
-        gen_qemu_st16_ppc64(temp2, t1, flags);
-        tcg_temp_free(temp2);
+        TCGv_i32 t0;
+        TCGv_i64 t1;
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_ext16u_i32(t0, t0);
+        tcg_gen_bswap16_i32(t0, t0);
+        t1 = tcg_temp_new_i64();
+        tcg_gen_extu_i32_tl(t1, t0);
+        tcg_temp_free_i32(t0);
+        gen_qemu_st16_ppc64(t1, arg1, flags);
+        tcg_temp_free_i64(t1);
     } else
-        gen_qemu_st16_ppc64(t0, t1, flags);
+        gen_qemu_st16_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_st32(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv temp1, temp2;
-        temp1 = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_trunc_tl_i32(temp1, t0);
-        tcg_gen_bswap_i32(temp1, temp1);
-        temp2 = tcg_temp_new(TCG_TYPE_I64);
-        tcg_gen_extu_i32_tl(temp2, temp1);
-        tcg_temp_free(temp1);
-        gen_qemu_st32_ppc64(temp2, t1, flags);
-        tcg_temp_free(temp2);
+        TCGv_i32 t0;
+        TCGv_i64 t1;
+        t0 = tcg_temp_new_i32();
+        tcg_gen_trunc_tl_i32(t0, arg0);
+        tcg_gen_bswap_i32(t0, t0);
+        t1 = tcg_temp_new_i64();
+        tcg_gen_extu_i32_tl(t1, t0);
+        tcg_temp_free_i32(t0);
+        gen_qemu_st32_ppc64(t1, arg1, flags);
+        tcg_temp_free_i64(t1);
     } else
-        gen_qemu_st32_ppc64(t0, t1, flags);
+        gen_qemu_st32_ppc64(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_st64(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st64(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv temp = tcg_temp_new(TCG_TYPE_I64);
-        tcg_gen_bswap_i64(temp, t0);
-        gen_qemu_st64_ppc64(temp, t1, flags);
-       tcg_temp_free(temp);
+        TCGv_i64 t0 = tcg_temp_new_i64();
+        tcg_gen_bswap_i64(t0, arg0);
+        gen_qemu_st64_ppc64(t0, arg1, flags);
+        tcg_temp_free_i64(t0);
     } else
-        gen_qemu_st64_ppc64(t0, t1, flags);
+        gen_qemu_st64_ppc64(arg0, arg1, flags);
 }
 
 
 #else /* defined(TARGET_PPC64) */
 #define GEN_QEMU_LD_PPC32(width)                                                 \
-static always_inline void gen_qemu_ld##width##_ppc32(TCGv t0, TCGv t1, int flags)\
+static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
 {                                                                                \
-    tcg_gen_qemu_ld##width(t0, t1, flags >> 1);                                  \
+    tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1);                                  \
 }
 GEN_QEMU_LD_PPC32(8u)
 GEN_QEMU_LD_PPC32(8s)
@@ -2356,96 +2694,76 @@ GEN_QEMU_LD_PPC32(16u)
 GEN_QEMU_LD_PPC32(16s)
 GEN_QEMU_LD_PPC32(32u)
 GEN_QEMU_LD_PPC32(32s)
-GEN_QEMU_LD_PPC32(64)
 
 #define GEN_QEMU_ST_PPC32(width)                                                 \
-static always_inline void gen_qemu_st##width##_ppc32(TCGv t0, TCGv t1, int flags)\
+static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
 {                                                                                \
-    tcg_gen_qemu_st##width(t0, t1, flags >> 1);                                  \
+    tcg_gen_qemu_st##width(arg0, arg1, flags >> 1);                                  \
 }
 GEN_QEMU_ST_PPC32(8)
 GEN_QEMU_ST_PPC32(16)
 GEN_QEMU_ST_PPC32(32)
-GEN_QEMU_ST_PPC32(64)
 
-static always_inline void gen_qemu_ld8u(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld8u_ppc32(t0, t1, flags >> 1);
+    gen_qemu_ld8u_ppc32(arg0, arg1, flags >> 1);
 }
 
-static always_inline void gen_qemu_ld8s(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld8s_ppc32(t0, t1, flags >> 1);
+    gen_qemu_ld8s_ppc32(arg0, arg1, flags >> 1);
 }
 
-static always_inline void gen_qemu_ld16u(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld16u_ppc32(t0, t1, flags >> 1);
+    gen_qemu_ld16u_ppc32(arg0, arg1, flags >> 1);
     if (unlikely(flags & 1))
-        tcg_gen_bswap16_i32(t0, t0);
+        tcg_gen_bswap16_i32(arg0, arg0);
 }
 
-static always_inline void gen_qemu_ld16s(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        gen_qemu_ld16u_ppc32(t0, t1, flags);
-        tcg_gen_bswap16_i32(t0, t0);
-        tcg_gen_ext16s_i32(t0, t0);
+        gen_qemu_ld16u_ppc32(arg0, arg1, flags);
+        tcg_gen_bswap16_i32(arg0, arg0);
+        tcg_gen_ext16s_i32(arg0, arg0);
     } else
-        gen_qemu_ld16s_ppc32(t0, t1, flags);
-}
-
-static always_inline void gen_qemu_ld32u(TCGv t0, TCGv t1, int flags)
-{
-    gen_qemu_ld32u_ppc32(t0, t1, flags);
-    if (unlikely(flags & 1))
-        tcg_gen_bswap_i32(t0, t0);
+        gen_qemu_ld16s_ppc32(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_ld64(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_ld64_ppc32(t0, t1, flags);
+    gen_qemu_ld32u_ppc32(arg0, arg1, flags);
     if (unlikely(flags & 1))
-        tcg_gen_bswap_i64(t0, t0);
+        tcg_gen_bswap_i32(arg0, arg0);
 }
 
-static always_inline void gen_qemu_st8(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
 {
-    gen_qemu_st8_ppc32(t0, t1, flags >> 1);
+    gen_qemu_st8_ppc32(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_st16(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv temp = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_ext16u_i32(temp, t0);
+        TCGv_i32 temp = tcg_temp_new_i32();
+        tcg_gen_ext16u_i32(temp, arg0);
         tcg_gen_bswap16_i32(temp, temp);
-        gen_qemu_st16_ppc32(temp, t1, flags >> 1);
-       tcg_temp_free(temp);
-    } else
-        gen_qemu_st16_ppc32(t0, t1, flags >> 1);
-}
-
-static always_inline void gen_qemu_st32(TCGv t0, TCGv t1, int flags)
-{
-    if (unlikely(flags & 1)) {
-        TCGv temp = tcg_temp_new(TCG_TYPE_I32);
-        tcg_gen_bswap_i32(temp, t0);
-        gen_qemu_st32_ppc32(temp, t1, flags >> 1);
-       tcg_temp_free(temp);
+        gen_qemu_st16_ppc32(temp, arg1, flags);
+        tcg_temp_free_i32(temp);
     } else
-        gen_qemu_st32_ppc32(t0, t1, flags >> 1);
+        gen_qemu_st16_ppc32(arg0, arg1, flags);
 }
 
-static always_inline void gen_qemu_st64(TCGv t0, TCGv t1, int flags)
+static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
 {
     if (unlikely(flags & 1)) {
-        TCGv temp = tcg_temp_new(TCG_TYPE_I64);
-        tcg_gen_bswap_i64(temp, t0);
-        gen_qemu_st64_ppc32(temp, t1, flags >> 1);
-       tcg_temp_free(temp);
+        TCGv_i32 temp = tcg_temp_new_i32();
+        tcg_gen_bswap_i32(temp, arg0);
+        gen_qemu_st32_ppc32(temp, arg1, flags);
+        tcg_temp_free_i32(temp);
     } else
-        gen_qemu_st64_ppc32(t0, t1, flags >> 1);
+        gen_qemu_st32_ppc32(arg0, arg1, flags);
 }
 
 #endif
@@ -2453,7 +2771,7 @@ static always_inline void gen_qemu_st64(TCGv t0, TCGv t1, int flags)
 #define GEN_LD(width, opc, type)                                              \
 GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type)                      \
 {                                                                             \
-    TCGv EA = tcg_temp_new(TCG_TYPE_TL);                                      \
+    TCGv EA = tcg_temp_new();                                      \
     gen_addr_imm_index(EA, ctx, 0);                                           \
     gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);           \
     tcg_temp_free(EA);                                                        \
@@ -2468,7 +2786,7 @@ GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type)                   \
         GEN_EXCP_INVAL(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    EA = tcg_temp_new(TCG_TYPE_TL);                                           \
+    EA = tcg_temp_new();                                           \
     if (type == PPC_64B)                                                      \
         gen_addr_imm_index(EA, ctx, 0x03);                                    \
     else                                                                      \
@@ -2487,7 +2805,7 @@ GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type)                 \
         GEN_EXCP_INVAL(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    EA = tcg_temp_new(TCG_TYPE_TL);                                           \
+    EA = tcg_temp_new();                                           \
     gen_addr_reg_index(EA, ctx);                                              \
     gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);           \
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
@@ -2497,7 +2815,7 @@ GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type)                 \
 #define GEN_LDX(width, opc2, opc3, type)                                      \
 GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type)                  \
 {                                                                             \
-    TCGv EA = tcg_temp_new(TCG_TYPE_TL);                                      \
+    TCGv EA = tcg_temp_new();                                      \
     gen_addr_reg_index(EA, ctx);                                              \
     gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);           \
     tcg_temp_free(EA);                                                        \
@@ -2536,7 +2854,7 @@ GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B)
             return;
         }
     }
-    EA = tcg_temp_new(TCG_TYPE_TL);
+    EA = tcg_temp_new();
     gen_addr_imm_index(EA, ctx, 0x03);
     if (ctx->opcode & 0x02) {
         /* lwa (lwau is undefined) */
@@ -2574,7 +2892,7 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX)
         GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
         return;
     }
-    EA = tcg_temp_new(TCG_TYPE_TL);
+    EA = tcg_temp_new();
     gen_addr_imm_index(EA, ctx, 0x0F);
     gen_qemu_ld64(cpu_gpr[rd], EA, ctx->mem_idx);
     tcg_gen_addi_tl(EA, EA, 8);
@@ -2588,7 +2906,7 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX)
 #define GEN_ST(width, opc, type)                                              \
 GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type)                     \
 {                                                                             \
-    TCGv EA = tcg_temp_new(TCG_TYPE_TL);                                      \
+    TCGv EA = tcg_temp_new();                                      \
     gen_addr_imm_index(EA, ctx, 0);                                           \
     gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx);       \
     tcg_temp_free(EA);                                                        \
@@ -2602,7 +2920,7 @@ GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type)                  \
         GEN_EXCP_INVAL(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    EA = tcg_temp_new(TCG_TYPE_TL);                                           \
+    EA = tcg_temp_new();                                           \
     if (type == PPC_64B)                                                      \
         gen_addr_imm_index(EA, ctx, 0x03);                                    \
     else                                                                      \
@@ -2620,7 +2938,7 @@ GEN_HANDLER(st##width##ux, 0x1F, opc2, opc3, 0x00000001, type)                \
         GEN_EXCP_INVAL(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    EA = tcg_temp_new(TCG_TYPE_TL);                                           \
+    EA = tcg_temp_new();                                           \
     gen_addr_reg_index(EA, ctx);                                              \
     gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx);           \
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
@@ -2630,7 +2948,7 @@ GEN_HANDLER(st##width##ux, 0x1F, opc2, opc3, 0x00000001, type)                \
 #define GEN_STX(width, opc2, opc3, type)                                      \
 GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type)                 \
 {                                                                             \
-    TCGv EA = tcg_temp_new(TCG_TYPE_TL);                                      \
+    TCGv EA = tcg_temp_new();                                      \
     gen_addr_reg_index(EA, ctx);                                              \
     gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx);           \
     tcg_temp_free(EA);                                                        \
@@ -2675,7 +2993,7 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
             GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
             return;
         }
-        EA = tcg_temp_new(TCG_TYPE_TL);
+        EA = tcg_temp_new();
         gen_addr_imm_index(EA, ctx, 0x03);
         gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
         tcg_gen_addi_tl(EA, EA, 8);
@@ -2690,7 +3008,7 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
                 return;
             }
         }
-        EA = tcg_temp_new(TCG_TYPE_TL);
+        EA = tcg_temp_new();
         gen_addr_imm_index(EA, ctx, 0x03);
         gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
         if (Rc(ctx->opcode))
@@ -2703,45 +3021,53 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
 /* lhbrx */
 void always_inline gen_qemu_ld16ur(TCGv t0, TCGv t1, int flags)
 {
-    TCGv temp = tcg_temp_new(TCG_TYPE_I32);
-    gen_qemu_ld16u(temp, t1, flags);
+    TCGv_i32 temp = tcg_temp_new_i32();
+    gen_qemu_ld16u(t0, t1, flags);
+    tcg_gen_trunc_tl_i32(temp, t0);
     tcg_gen_bswap16_i32(temp, temp);
     tcg_gen_extu_i32_tl(t0, temp);
-    tcg_temp_free(temp);
+    tcg_temp_free_i32(temp);
 }
 GEN_LDX(16ur, 0x16, 0x18, PPC_INTEGER);
 
 /* lwbrx */
 void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags)
 {
-    TCGv temp = tcg_temp_new(TCG_TYPE_I32);
-    gen_qemu_ld32u(temp, t1, flags);
+    TCGv_i32 temp = tcg_temp_new_i32();
+    gen_qemu_ld32u(t0, t1, flags);
+    tcg_gen_trunc_tl_i32(temp, t0);
     tcg_gen_bswap_i32(temp, temp);
     tcg_gen_extu_i32_tl(t0, temp);
-    tcg_temp_free(temp);
+    tcg_temp_free_i32(temp);
 }
 GEN_LDX(32ur, 0x16, 0x10, PPC_INTEGER);
 
 /* sthbrx */
 void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags)
 {
-    TCGv temp = tcg_temp_new(TCG_TYPE_I32);
+    TCGv_i32 temp = tcg_temp_new_i32();
+    TCGv t2 = tcg_temp_new();
     tcg_gen_trunc_tl_i32(temp, t0);
     tcg_gen_ext16u_i32(temp, temp);
     tcg_gen_bswap16_i32(temp, temp);
-    gen_qemu_st16(temp, t1, flags);
-    tcg_temp_free(temp);
+    tcg_gen_extu_i32_tl(t2, temp);
+    tcg_temp_free_i32(temp);
+    gen_qemu_st16(t2, t1, flags);
+    tcg_temp_free(t2);
 }
 GEN_STX(16r, 0x16, 0x1C, PPC_INTEGER);
 
 /* stwbrx */
 void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags)
 {
-    TCGv temp = tcg_temp_new(TCG_TYPE_I32);
+    TCGv_i32 temp = tcg_temp_new_i32();
+    TCGv t2 = tcg_temp_new();
     tcg_gen_trunc_tl_i32(temp, t0);
     tcg_gen_bswap_i32(temp, temp);
-    gen_qemu_st32(temp, t1, flags);
-    tcg_temp_free(temp);
+    tcg_gen_extu_i32_tl(t2, temp);
+    tcg_temp_free_i32(temp);
+    gen_qemu_st32(t2, t1, flags);
+    tcg_temp_free(t2);
 }
 GEN_STX(32r, 0x16, 0x14, PPC_INTEGER);
 
@@ -2852,7 +3178,7 @@ GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING)
     if (ra == 0) {
         ra = rb;
     }
-    gen_op_load_xer_bc();
+    tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
     op_ldstsx(lswx, rD(ctx->opcode), ra, rb);
 }
 
@@ -2876,7 +3202,7 @@ GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING)
     /* NIP cannot be restored if the memory exception comes from an helper */
     gen_update_nip(ctx, ctx->nip - 4);
     gen_addr_reg_index(cpu_T[0], ctx);
-    gen_op_load_xer_bc();
+    tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
     op_ldsts(stsw, rS(ctx->opcode));
 }
 
@@ -3118,28 +3444,20 @@ static always_inline void gen_goto_tb (DisasContext *ctx, int n,
 {
     TranslationBlock *tb;
     tb = ctx->tb;
+#if defined(TARGET_PPC64)
+    if (!ctx->sf_mode)
+        dest = (uint32_t) dest;
+#endif
     if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
         likely(!ctx->singlestep_enabled)) {
         tcg_gen_goto_tb(n);
-        tcg_gen_movi_tl(cpu_T[1], dest);
-#if defined(TARGET_PPC64)
-        if (ctx->sf_mode)
-            tcg_gen_andi_tl(cpu_nip, cpu_T[1], ~3);
-        else
-#endif
-            tcg_gen_andi_tl(cpu_nip, cpu_T[1], (uint32_t)~3);
+        tcg_gen_movi_tl(cpu_nip, dest & ~3);
         tcg_gen_exit_tb((long)tb + n);
     } else {
-        tcg_gen_movi_tl(cpu_T[1], dest);
-#if defined(TARGET_PPC64)
-        if (ctx->sf_mode)
-            tcg_gen_andi_tl(cpu_nip, cpu_T[1], ~3);
-        else
-#endif
-            tcg_gen_andi_tl(cpu_nip, cpu_T[1], (uint32_t)~3);
+        tcg_gen_movi_tl(cpu_nip, dest & ~3);
         if (unlikely(ctx->singlestep_enabled)) {
             if ((ctx->singlestep_enabled &
-                 (CPU_BRANCH_STEP | CPU_SINGLE_STEP)) &&
+                (CPU_BRANCH_STEP | CPU_SINGLE_STEP)) &&
                 ctx->exception == POWERPC_EXCP_BRANCH) {
                 target_ulong tmp = ctx->nip;
                 ctx->nip = dest;
@@ -3158,11 +3476,11 @@ static always_inline void gen_goto_tb (DisasContext *ctx, int n,
 static always_inline void gen_setlr (DisasContext *ctx, target_ulong nip)
 {
 #if defined(TARGET_PPC64)
-    if (ctx->sf_mode != 0 && (nip >> 32))
-        gen_op_setlr_64(ctx->nip >> 32, ctx->nip);
+    if (ctx->sf_mode == 0)
+        tcg_gen_movi_tl(cpu_lr, (uint32_t)nip);
     else
 #endif
-        gen_op_setlr(ctx->nip);
+        tcg_gen_movi_tl(cpu_lr, nip);
 }
 
 /* b ba bl bla */
@@ -3182,10 +3500,6 @@ GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW)
         target = ctx->nip + li - 4;
     else
         target = li;
-#if defined(TARGET_PPC64)
-    if (!ctx->sf_mode)
-        target = (uint32_t)target;
-#endif
     if (LK(ctx->opcode))
         gen_setlr(ctx, ctx->nip);
     gen_goto_tb(ctx, 0, target);
@@ -3197,141 +3511,81 @@ GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW)
 
 static always_inline void gen_bcond (DisasContext *ctx, int type)
 {
-    target_ulong target = 0;
-    target_ulong li;
     uint32_t bo = BO(ctx->opcode);
-    uint32_t bi = BI(ctx->opcode);
-    uint32_t mask;
+    int l1 = gen_new_label();
+    TCGv target;
 
     ctx->exception = POWERPC_EXCP_BRANCH;
-    if ((bo & 0x4) == 0)
-        gen_op_dec_ctr();
-    switch(type) {
-    case BCOND_IM:
-        li = (target_long)((int16_t)(BD(ctx->opcode)));
-        if (likely(AA(ctx->opcode) == 0)) {
-            target = ctx->nip + li - 4;
-        } else {
-            target = li;
-        }
-#if defined(TARGET_PPC64)
-        if (!ctx->sf_mode)
-            target = (uint32_t)target;
-#endif
-        break;
-    case BCOND_CTR:
-        gen_op_movl_T1_ctr();
-        break;
-    default:
-    case BCOND_LR:
-        gen_op_movl_T1_lr();
-        break;
+    if (type == BCOND_LR || type == BCOND_CTR) {
+        target = tcg_temp_local_new();
+        if (type == BCOND_CTR)
+            tcg_gen_mov_tl(target, cpu_ctr);
+        else
+            tcg_gen_mov_tl(target, cpu_lr);
     }
     if (LK(ctx->opcode))
         gen_setlr(ctx, ctx->nip);
-    if (bo & 0x10) {
-        /* No CR condition */
-        switch (bo & 0x6) {
-        case 0:
-#if defined(TARGET_PPC64)
-            if (ctx->sf_mode)
-                gen_op_test_ctr_64();
-            else
-#endif
-                gen_op_test_ctr();
-            break;
-        case 2:
-#if defined(TARGET_PPC64)
-            if (ctx->sf_mode)
-                gen_op_test_ctrz_64();
-            else
-#endif
-                gen_op_test_ctrz();
-            break;
-        default:
-        case 4:
-        case 6:
-            if (type == BCOND_IM) {
-                gen_goto_tb(ctx, 0, target);
-                return;
-            } else {
+    l1 = gen_new_label();
+    if ((bo & 0x4) == 0) {
+        /* Decrement and test CTR */
+        TCGv temp = tcg_temp_new();
+        if (unlikely(type == BCOND_CTR)) {
+            GEN_EXCP_INVAL(ctx);
+            return;
+        }
+        tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
 #if defined(TARGET_PPC64)
-                if (ctx->sf_mode)
-                    tcg_gen_andi_tl(cpu_nip, cpu_T[1], ~3);
-                else
+        if (!ctx->sf_mode)
+            tcg_gen_ext32u_tl(temp, cpu_ctr);
+        else
 #endif
-                    tcg_gen_andi_tl(cpu_nip, cpu_T[1], (uint32_t)~3);
-                goto no_test;
-            }
-            break;
+            tcg_gen_mov_tl(temp, cpu_ctr);
+        if (bo & 0x2) {
+            tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
+        } else {
+            tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
         }
-    } else {
-        mask = 1 << (3 - (bi & 0x03));
-        tcg_gen_mov_i32(cpu_T[0], cpu_crf[bi >> 2]);
+        tcg_temp_free(temp);
+    }
+    if ((bo & 0x10) == 0) {
+        /* Test CR */
+        uint32_t bi = BI(ctx->opcode);
+        uint32_t mask = 1 << (3 - (bi & 0x03));
+        TCGv_i32 temp = tcg_temp_new_i32();
+
         if (bo & 0x8) {
-            switch (bo & 0x6) {
-            case 0:
-#if defined(TARGET_PPC64)
-                if (ctx->sf_mode)
-                    gen_op_test_ctr_true_64(mask);
-                else
-#endif
-                    gen_op_test_ctr_true(mask);
-                break;
-            case 2:
-#if defined(TARGET_PPC64)
-                if (ctx->sf_mode)
-                    gen_op_test_ctrz_true_64(mask);
-                else
-#endif
-                    gen_op_test_ctrz_true(mask);
-                break;
-            default:
-            case 4:
-            case 6:
-                gen_op_test_true(mask);
-                break;
-            }
+            tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+            tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
         } else {
-            switch (bo & 0x6) {
-            case 0:
-#if defined(TARGET_PPC64)
-                if (ctx->sf_mode)
-                    gen_op_test_ctr_false_64(mask);
-                else
-#endif
-                    gen_op_test_ctr_false(mask);
-                break;
-            case 2:
-#if defined(TARGET_PPC64)
-                if (ctx->sf_mode)
-                    gen_op_test_ctrz_false_64(mask);
-                else
-#endif
-                    gen_op_test_ctrz_false(mask);
-                break;
-            default:
-            case 4:
-            case 6:
-                gen_op_test_false(mask);
-                break;
-            }
+            tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+            tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
         }
+        tcg_temp_free_i32(temp);
     }
     if (type == BCOND_IM) {
-        int l1 = gen_new_label();
-        gen_op_jz_T0(l1);
-        gen_goto_tb(ctx, 0, target);
+        target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
+        if (likely(AA(ctx->opcode) == 0)) {
+            gen_goto_tb(ctx, 0, ctx->nip + li - 4);
+        } else {
+            gen_goto_tb(ctx, 0, li);
+        }
         gen_set_label(l1);
         gen_goto_tb(ctx, 1, ctx->nip);
     } else {
 #if defined(TARGET_PPC64)
-        if (ctx->sf_mode)
-            gen_op_btest_T1_64(ctx->nip >> 32, ctx->nip);
+        if (!(ctx->sf_mode))
+            tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
+        else
+#endif
+            tcg_gen_andi_tl(cpu_nip, target, ~3);
+        tcg_gen_exit_tb(0);
+        gen_set_label(l1);
+#if defined(TARGET_PPC64)
+        if (!(ctx->sf_mode))
+            tcg_gen_movi_tl(cpu_nip, (uint32_t)ctx->nip);
         else
 #endif
-            gen_op_btest_T1(ctx->nip);
-    no_test:
+            tcg_gen_movi_tl(cpu_nip, ctx->nip);
         tcg_gen_exit_tb(0);
     }
 }
@@ -3352,47 +3606,53 @@ GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW)
 }
 
 /***                      Condition register logical                       ***/
-#define GEN_CRLOGIC(op, opc)                                                  \
-GEN_HANDLER(cr##op, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)                 \
+#define GEN_CRLOGIC(name, tcg_op, opc)                                        \
+GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)                   \
 {                                                                             \
     uint8_t bitmask;                                                          \
     int sh;                                                                   \
-    tcg_gen_mov_i32(cpu_T[0], cpu_crf[crbA(ctx->opcode) >> 2]);               \
+    TCGv_i32 t0, t1;                                                          \
     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
+    t0 = tcg_temp_new_i32();                                                  \
     if (sh > 0)                                                               \
-        gen_op_srli_T0(sh);                                                   \
+        tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
     else if (sh < 0)                                                          \
-        gen_op_sli_T0(-sh);                                                   \
-    tcg_gen_mov_i32(cpu_T[1], cpu_crf[crbB(ctx->opcode) >> 2]);               \
+        tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
+    else                                                                      \
+        tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
+    t1 = tcg_temp_new_i32();                                                  \
     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
     if (sh > 0)                                                               \
-        gen_op_srli_T1(sh);                                                   \
+        tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
     else if (sh < 0)                                                          \
-        gen_op_sli_T1(-sh);                                                   \
-    gen_op_##op();                                                            \
+        tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
+    else                                                                      \
+        tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
+    tcg_op(t0, t0, t1);                                                       \
     bitmask = 1 << (3 - (crbD(ctx->opcode) & 0x03));                          \
-    tcg_gen_andi_tl(cpu_T[0], cpu_T[0], bitmask);                             \
-    tcg_gen_andi_i32(cpu_T[1], cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);    \
-    gen_op_or();                                                              \
-    tcg_gen_andi_i32(cpu_crf[crbD(ctx->opcode) >> 2], cpu_T[0], 0xf);         \
+    tcg_gen_andi_i32(t0, t0, bitmask);                                        \
+    tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
+    tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
 }
 
 /* crand */
-GEN_CRLOGIC(and, 0x08);
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
 /* crandc */
-GEN_CRLOGIC(andc, 0x04);
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
 /* creqv */
-GEN_CRLOGIC(eqv, 0x09);
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
 /* crnand */
-GEN_CRLOGIC(nand, 0x07);
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
 /* crnor */
-GEN_CRLOGIC(nor, 0x01);
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
 /* cror */
-GEN_CRLOGIC(or, 0x0E);
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
 /* crorc */
-GEN_CRLOGIC(orc, 0x0D);
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
 /* crxor */
-GEN_CRLOGIC(xor, 0x06);
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
 /* mcrf */
 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER)
 {
@@ -3509,10 +3769,9 @@ GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B)
 /* mcrxr */
 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC)
 {
-    gen_op_load_xer_cr();
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);
-    gen_op_clear_xer_ov();
-    gen_op_clear_xer_ca();
+    tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], cpu_xer);
+    tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], XER_CA);
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_SO | 1 << XER_OV | 1 << XER_CA));
 }
 
 /* mfcr */
@@ -3524,12 +3783,11 @@ GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC)
         crm = CRM(ctx->opcode);
         if (likely((crm ^ (crm - 1)) == 0)) {
             crn = ffs(crm);
-            tcg_gen_mov_i32(cpu_T[0], cpu_crf[7 - crn]);
+            tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
         }
     } else {
-        gen_op_load_cr();
+        gen_helper_load_cr(cpu_gpr[rD(ctx->opcode)]);
     }
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
 }
 
 /* mfmsr */
@@ -3621,14 +3879,18 @@ GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC)
 {
     uint32_t crm, crn;
 
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
     crm = CRM(ctx->opcode);
     if (likely((ctx->opcode & 0x00100000) || (crm ^ (crm - 1)) == 0)) {
+        TCGv_i32 temp = tcg_temp_new_i32();
         crn = ffs(crm);
-        gen_op_srli_T0(crn * 4);
-        tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_T[0], 0xf);
+        tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+        tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
+        tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
+        tcg_temp_free_i32(temp);
     } else {
-        gen_op_store_cr(crm);
+        TCGv_i32 temp = tcg_const_i32(crm);
+        gen_helper_store_cr(cpu_gpr[rS(ctx->opcode)], temp);
+        tcg_temp_free_i32(temp);
     }
 }
 
@@ -3740,10 +4002,10 @@ GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000001, PPC_MISC)
 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE)
 {
     /* XXX: specification says this is treated as a load by the MMU */
-    TCGv temp = tcg_temp_new(TCG_TYPE_TL);
-    gen_addr_reg_index(temp, ctx);
-    gen_qemu_ld8u(temp, temp, ctx->mem_idx);
-    tcg_temp_free(temp);
+    TCGv t0 = tcg_temp_new();
+    gen_addr_reg_index(t0, ctx);
+    gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+    tcg_temp_free(t0);
 }
 
 /* dcbi (Supervisor only) */
@@ -3757,9 +4019,9 @@ GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE)
         GEN_EXCP_PRIVOPC(ctx);
         return;
     }
-    EA = tcg_temp_new(TCG_TYPE_TL);
+    EA = tcg_temp_new();
     gen_addr_reg_index(EA, ctx);
-    val = tcg_temp_new(TCG_TYPE_TL);
+    val = tcg_temp_new();
     /* XXX: specification says this should be treated as a store by the MMU */
     gen_qemu_ld8u(val, EA, ctx->mem_idx);
     gen_qemu_st8(val, EA, ctx->mem_idx);
@@ -3772,10 +4034,10 @@ GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE)
 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE)
 {
     /* XXX: specification say this is treated as a load by the MMU */
-    TCGv temp = tcg_temp_new(TCG_TYPE_TL);
-    gen_addr_reg_index(temp, ctx);
-    gen_qemu_ld8u(temp, temp, ctx->mem_idx);
-    tcg_temp_free(temp);
+    TCGv t0 = tcg_temp_new();
+    gen_addr_reg_index(t0, ctx);
+    gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+    tcg_temp_free(t0);
 }
 
 /* dcbt */
@@ -4180,7 +4442,7 @@ GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR)
     gen_op_POWER_abs();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* abso - abso. */
@@ -4190,7 +4452,7 @@ GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR)
     gen_op_POWER_abso();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* clcs */
@@ -4210,7 +4472,7 @@ GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_div();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* divo - divo. */
@@ -4221,7 +4483,7 @@ GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_divo();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* divs - divs. */
@@ -4232,7 +4494,7 @@ GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_divs();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* divso - divso. */
@@ -4243,7 +4505,7 @@ GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_divso();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* doz - doz. */
@@ -4254,7 +4516,7 @@ GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_doz();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* dozo - dozo. */
@@ -4265,7 +4527,7 @@ GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_dozo();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* dozi */
@@ -4310,12 +4572,14 @@ GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR)
     }
     /* NIP cannot be restored if the memory exception comes from an helper */
     gen_update_nip(ctx, ctx->nip - 4);
-    gen_op_load_xer_bc();
-    gen_op_load_xer_cmp();
+    tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
+    tcg_gen_shri_tl(cpu_T[2], cpu_xer, XER_CMP);
+    tcg_gen_andi_tl(cpu_T[2], cpu_T[2], 0xFF);
     op_POWER_lscbx(rD(ctx->opcode), ra, rb);
-    gen_op_store_xer_bc();
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
+    tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* maskg - maskg. */
@@ -4326,7 +4590,7 @@ GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_maskg();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* maskir - maskir. */
@@ -4338,7 +4602,7 @@ GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_maskir();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* mul - mul. */
@@ -4349,7 +4613,7 @@ GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_mul();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* mulo - mulo. */
@@ -4360,7 +4624,7 @@ GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_mulo();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* nabs - nabs. */
@@ -4370,7 +4634,7 @@ GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_nabs();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* nabso - nabso. */
@@ -4380,7 +4644,7 @@ GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_nabso();
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* rlmi - rlmi. */
@@ -4396,7 +4660,7 @@ GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_rlmi(MASK(mb, me), ~MASK(mb, me));
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* rrib - rrib. */
@@ -4408,7 +4672,7 @@ GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_rrib();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sle - sle. */
@@ -4419,7 +4683,7 @@ GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sle();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sleq - sleq. */
@@ -4430,7 +4694,7 @@ GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sleq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sliq - sliq. */
@@ -4441,7 +4705,7 @@ GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sle();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* slliq - slliq. */
@@ -4452,7 +4716,7 @@ GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sleq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sllq - sllq. */
@@ -4463,7 +4727,7 @@ GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sllq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* slq - slq. */
@@ -4474,7 +4738,7 @@ GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_slq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sraiq - sraiq. */
@@ -4485,7 +4749,7 @@ GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sraq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sraq - sraq. */
@@ -4496,7 +4760,7 @@ GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sraq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sre - sre. */
@@ -4507,7 +4771,7 @@ GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sre();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* srea - srea. */
@@ -4518,7 +4782,7 @@ GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_srea();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sreq */
@@ -4529,7 +4793,7 @@ GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_sreq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* sriq */
@@ -4540,7 +4804,7 @@ GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_srq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* srliq */
@@ -4552,7 +4816,7 @@ GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_srlq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* srlq */
@@ -4563,7 +4827,7 @@ GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_srlq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* srq */
@@ -4574,7 +4838,7 @@ GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR)
     gen_op_POWER_srq();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
     if (unlikely(Rc(ctx->opcode) != 0))
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_T[0]);
 }
 
 /* PowerPC 602 specific instructions */
@@ -4914,8 +5178,11 @@ static always_inline void gen_405_mulladd_insn (DisasContext *ctx,
                                                 int opc2, int opc3,
                                                 int ra, int rb, int rt, int Rc)
 {
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[ra]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rb]);
+    TCGv t0, t1;
+
+    t0 = tcg_temp_local_new();
+    t1 = tcg_temp_local_new();
+
     switch (opc3 & 0x0D) {
     case 0x05:
         /* macchw    - macchw.    - macchwo   - macchwo.   */
@@ -4923,13 +5190,17 @@ static always_inline void gen_405_mulladd_insn (DisasContext *ctx,
         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
         /* mulchw - mulchw. */
-        gen_op_405_mulchw();
+        tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+        tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+        tcg_gen_ext16s_tl(t1, t1);
         break;
     case 0x04:
         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
         /* mulchwu - mulchwu. */
-        gen_op_405_mulchwu();
+        tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+        tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+        tcg_gen_ext16u_tl(t1, t1);
         break;
     case 0x01:
         /* machhw    - machhw.    - machhwo   - machhwo.   */
@@ -4937,13 +5208,19 @@ static always_inline void gen_405_mulladd_insn (DisasContext *ctx,
         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
         /* mulhhw - mulhhw. */
-        gen_op_405_mulhhw();
+        tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
+        tcg_gen_ext16s_tl(t0, t0);
+        tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+        tcg_gen_ext16s_tl(t1, t1);
         break;
     case 0x00:
         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
         /* mulhhwu - mulhhwu. */
-        gen_op_405_mulhhwu();
+        tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
+        tcg_gen_ext16u_tl(t0, t0);
+        tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+        tcg_gen_ext16u_tl(t1, t1);
         break;
     case 0x0D:
         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
@@ -4951,43 +5228,70 @@ static always_inline void gen_405_mulladd_insn (DisasContext *ctx,
         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
         /* mullhw - mullhw. */
-        gen_op_405_mullhw();
+        tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+        tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
         break;
     case 0x0C:
         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
         /* mullhwu - mullhwu. */
-        gen_op_405_mullhwu();
+        tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+        tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
         break;
     }
-    if (opc2 & 0x02) {
-        /* nmultiply-and-accumulate (0x0E) */
-        gen_op_neg();
-    }
     if (opc2 & 0x04) {
-        /* (n)multiply-and-accumulate (0x0C - 0x0E) */
-        tcg_gen_mov_tl(cpu_T[2], cpu_gpr[rt]);
-        tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
-        gen_op_405_add_T0_T2();
-    }
-    if (opc3 & 0x10) {
-        /* Check overflow */
-        if (opc3 & 0x01)
-            gen_op_check_addo();
-        else
-            gen_op_405_check_ovu();
-    }
-    if (opc3 & 0x02) {
-        /* Saturate */
-        if (opc3 & 0x01)
-            gen_op_405_check_sat();
-        else
-            gen_op_405_check_satu();
+        /* (n)multiply-and-accumulate (0x0C / 0x0E) */
+        tcg_gen_mul_tl(t1, t0, t1);
+        if (opc2 & 0x02) {
+            /* nmultiply-and-accumulate (0x0E) */
+            tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
+        } else {
+            /* multiply-and-accumulate (0x0C) */
+            tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
+        }
+
+        if (opc3 & 0x12) {
+            /* Check overflow and/or saturate */
+            int l1 = gen_new_label();
+
+            if (opc3 & 0x10) {
+                /* Start with XER OV disabled, the most likely case */
+                tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+            }
+            if (opc3 & 0x01) {
+                /* Signed */
+                tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
+                tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+                tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
+                tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
+                if (opc3 & 0x02) {
+                    /* Saturate */
+                    tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
+                    tcg_gen_xori_tl(t0, t0, 0x7fffffff);
+                }
+            } else {
+                /* Unsigned */
+                tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+                if (opc3 & 0x02) {
+                    /* Saturate */
+                    tcg_gen_movi_tl(t0, UINT32_MAX);
+                }
+            }
+            if (opc3 & 0x10) {
+                /* Check overflow */
+                tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+            }
+            gen_set_label(l1);
+            tcg_gen_mov_tl(cpu_gpr[rt], t0);
+        }
+    } else {
+        tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
     }
-    tcg_gen_mov_tl(cpu_gpr[rt], cpu_T[0]);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
     if (unlikely(Rc) != 0) {
         /* Update Rc0 */
-        gen_set_Rc0(ctx);
+        gen_set_Rc0(ctx, cpu_gpr[rt]);
     }
 }
 
@@ -5199,9 +5503,9 @@ GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON)
         GEN_EXCP_PRIVOPC(ctx);
         return;
     }
-    EA = tcg_temp_new(TCG_TYPE_TL);
+    EA = tcg_temp_new();
     gen_addr_reg_index(EA, ctx);
-    val = tcg_temp_new(TCG_TYPE_TL);
+    val = tcg_temp_new();
     gen_qemu_ld32u(val, EA, ctx->mem_idx);
     tcg_temp_free(val);
     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
@@ -5500,10 +5804,12 @@ GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC)
     tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
     gen_op_440_dlmzb();
     tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
-    gen_op_store_xer_bc();
+    tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
+    tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]);
     if (Rc(ctx->opcode)) {
         gen_op_440_dlmzb_update_Rc();
-        tcg_gen_andi_i32(cpu_crf[0], cpu_T[0], 0xf);
+        tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_T[0]);
+        tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 0xf);
     }
 }
 
@@ -5590,7 +5896,7 @@ GEN_VR_STX(vxl, 0x07, 0x0F);
 /***                           SPE extension                               ***/
 /* Register moves */
 
-static always_inline void gen_load_gpr64(TCGv t, int reg) {
+static always_inline void gen_load_gpr64(TCGv_i64 t, int reg) {
 #if defined(TARGET_PPC64)
     tcg_gen_mov_i64(t, cpu_gpr[reg]);
 #else
@@ -5598,15 +5904,15 @@ static always_inline void gen_load_gpr64(TCGv t, int reg) {
 #endif
 }
 
-static always_inline void gen_store_gpr64(int reg, TCGv t) {
+static always_inline void gen_store_gpr64(int reg, TCGv_i64 t) {
 #if defined(TARGET_PPC64)
     tcg_gen_mov_i64(cpu_gpr[reg], t);
 #else
+    TCGv_i64 tmp = tcg_temp_new_i64();
     tcg_gen_trunc_i64_i32(cpu_gpr[reg], t);
-    TCGv tmp = tcg_temp_new(TCG_TYPE_I64);
     tcg_gen_shri_i64(tmp, t, 32);
     tcg_gen_trunc_i64_i32(cpu_gprh[reg], tmp);
-    tcg_temp_free(tmp);
+    tcg_temp_free_i64(tmp);
 #endif
 }
 
@@ -5710,203 +6016,556 @@ GEN_SPE_STX(name)
 GEN_SPEOP_LD(name, sh);                                                       \
 GEN_SPEOP_ST(name, sh)
 
-/* SPE arithmetic and logic */
-#define GEN_SPEOP_ARITH2(name)                                                \
+/* SPE logic */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_LOGIC2(name, tcg_op)                                        \
 static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
-    gen_load_gpr64(cpu_T64[1], rB(ctx->opcode));                              \
-    gen_op_##name();                                                          \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+    tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],                \
+           cpu_gpr[rB(ctx->opcode)]);                                         \
 }
-
-#define GEN_SPEOP_TCG_ARITH2(name)                                            \
+#else
+#define GEN_SPEOP_LOGIC2(name, tcg_op)                                        \
 static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    TCGv t0 = tcg_temp_new(TCG_TYPE_I64);                                     \
-    TCGv t1 = tcg_temp_new(TCG_TYPE_I64);                                     \
-    gen_load_gpr64(t0, rA(ctx->opcode));                                      \
-    gen_load_gpr64(t1, rB(ctx->opcode));                                      \
-    gen_op_##name(t0, t1);                                                    \
-    gen_store_gpr64(rD(ctx->opcode), t0);                                     \
-    tcg_temp_free(t0);                                                        \
-    tcg_temp_free(t1);                                                        \
+    tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],                \
+           cpu_gpr[rB(ctx->opcode)]);                                         \
+    tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],              \
+           cpu_gprh[rB(ctx->opcode)]);                                        \
 }
+#endif
+
+GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
+GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
+GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
+GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
+GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
+GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
+GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
+GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
 
-#define GEN_SPEOP_ARITH1(name)                                                \
+/* SPE logic immediate */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi)                               \
 static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
-    gen_op_##name();                                                          \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+    TCGv_i32 t0 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t1 = tcg_temp_local_new_i32();                                   \
+    TCGv_i64 t2 = tcg_temp_local_new_i64();                                   \
+    tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]);                      \
+    tcg_opi(t0, t0, rB(ctx->opcode));                                         \
+    tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t1, t2);                                            \
+    tcg_temp_free_i64(t2);                                                    \
+    tcg_opi(t1, t1, rB(ctx->opcode));                                         \
+    tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);                 \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
 }
-
-#define GEN_SPEOP_COMP(name)                                                  \
+#else
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi)                               \
 static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
-    gen_load_gpr64(cpu_T64[1], rB(ctx->opcode));                              \
-    gen_op_##name();                                                          \
-    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf);              \
+    tcg_opi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],               \
+            rB(ctx->opcode));                                                 \
+    tcg_opi(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],             \
+            rB(ctx->opcode));                                                 \
 }
+#endif
+GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
 
-/* Logical */
-static always_inline void gen_op_evand (TCGv t0, TCGv t1)
-{
-    tcg_gen_and_i64(t0, t0, t1);
+/* SPE arithmetic */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH1(name, tcg_op)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    TCGv_i32 t0 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t1 = tcg_temp_local_new_i32();                                   \
+    TCGv_i64 t2 = tcg_temp_local_new_i64();                                   \
+    tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]);                      \
+    tcg_op(t0, t0);                                                           \
+    tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t1, t2);                                            \
+    tcg_temp_free_i64(t2);                                                    \
+    tcg_op(t1, t1);                                                           \
+    tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);                 \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
 }
-
-static always_inline void gen_op_evandc (TCGv t0, TCGv t1)
-{
-    tcg_gen_not_i64(t1, t1);
-    tcg_gen_and_i64(t0, t0, t1);
+#else
+#define GEN_SPEOP_ARITH1(name, tcg_op)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);               \
+    tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);             \
 }
+#endif
 
-static always_inline void gen_op_evxor (TCGv t0, TCGv t1)
+static always_inline void gen_op_evabs (TCGv_i32 ret, TCGv_i32 arg1)
 {
-    tcg_gen_xor_i64(t0, t0, t1);
-}
+    int l1 = gen_new_label();
+    int l2 = gen_new_label();
 
-static always_inline void gen_op_evor (TCGv t0, TCGv t1)
-{
-    tcg_gen_or_i64(t0, t0, t1);
+    tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
+    tcg_gen_neg_i32(ret, arg1);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_mov_i32(ret, arg1);
+    gen_set_label(l2);
 }
-
-static always_inline void gen_op_evnor (TCGv t0, TCGv t1)
+GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
+GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
+GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
+GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
+static always_inline void gen_op_evrndw (TCGv_i32 ret, TCGv_i32 arg1)
 {
-    tcg_gen_or_i64(t0, t0, t1);
-    tcg_gen_not_i64(t0, t0);
+    tcg_gen_addi_i32(ret, arg1, 0x8000);
+    tcg_gen_ext16u_i32(ret, ret);
 }
+GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
+GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
+GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
 
-static always_inline void gen_op_eveqv (TCGv t0, TCGv t1)
-{
-    tcg_gen_xor_i64(t0, t0, t1);
-    tcg_gen_not_i64(t0, t0);
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH2(name, tcg_op)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    TCGv_i32 t0 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t1 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t2 = tcg_temp_local_new_i32();                                   \
+    TCGv_i64 t3 = tcg_temp_local_new(TCG_TYPE_I64);                           \
+    tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]);                      \
+    tcg_gen_trunc_i64_i32(t2, cpu_gpr[rB(ctx->opcode)]);                      \
+    tcg_op(t0, t0, t2);                                                       \
+    tcg_gen_shri_i64(t3, cpu_gpr[rA(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t1, t3);                                            \
+    tcg_gen_shri_i64(t3, cpu_gpr[rB(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t2, t3);                                            \
+    tcg_temp_free_i64(t3);                                                    \
+    tcg_op(t1, t1, t2);                                                       \
+    tcg_temp_free_i32(t2);                                                    \
+    tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);                 \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
 }
-
-static always_inline void gen_op_evorc (TCGv t0, TCGv t1)
+#else
+#define GEN_SPEOP_ARITH2(name, tcg_op)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],                \
+           cpu_gpr[rB(ctx->opcode)]);                                         \
+    tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],              \
+           cpu_gprh[rB(ctx->opcode)]);                                        \
+}
+#endif
+
+static always_inline void gen_op_evsrwu (TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+    TCGv_i32 t0;
+    int l1, l2;
+
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+    t0 = tcg_temp_local_new_i32();
+    /* No error here: 6 bits are used */
+    tcg_gen_andi_i32(t0, arg2, 0x3F);
+    tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+    tcg_gen_shr_i32(ret, arg1, t0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_movi_i32(ret, 0);
+    tcg_gen_br(l2);
+    tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
+static always_inline void gen_op_evsrws (TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+    TCGv_i32 t0;
+    int l1, l2;
+
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+    t0 = tcg_temp_local_new_i32();
+    /* No error here: 6 bits are used */
+    tcg_gen_andi_i32(t0, arg2, 0x3F);
+    tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+    tcg_gen_sar_i32(ret, arg1, t0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_movi_i32(ret, 0);
+    tcg_gen_br(l2);
+    tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
+static always_inline void gen_op_evslw (TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+    TCGv_i32 t0;
+    int l1, l2;
+
+    l1 = gen_new_label();
+    l2 = gen_new_label();
+    t0 = tcg_temp_local_new_i32();
+    /* No error here: 6 bits are used */
+    tcg_gen_andi_i32(t0, arg2, 0x3F);
+    tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+    tcg_gen_shl_i32(ret, arg1, t0);
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+    tcg_gen_movi_i32(ret, 0);
+    tcg_gen_br(l2);
+    tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
+static always_inline void gen_op_evrlw (TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+    TCGv_i32 t0 = tcg_temp_new_i32();
+    tcg_gen_andi_i32(t0, arg2, 0x1F);
+    tcg_gen_rotl_i32(ret, arg1, t0);
+    tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
+static always_inline void gen_evmergehi (DisasContext *ctx)
 {
-    tcg_gen_not_i64(t1, t1);
-    tcg_gen_or_i64(t0, t0, t1);
+    if (unlikely(!ctx->spe_enabled)) {
+        GEN_EXCP_NO_AP(ctx);
+        return;
+    }
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 32);
+    tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF0000000ULL);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+    tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
 }
-
-static always_inline void gen_op_evnand (TCGv t0, TCGv t1)
+GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
+static always_inline void gen_op_evsubf (TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
 {
-    tcg_gen_and_i64(t0, t0, t1);
-    tcg_gen_not_i64(t0, t0);
+    tcg_gen_sub_i32(ret, arg2, arg1);
 }
+GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
 
-GEN_SPEOP_TCG_ARITH2(evand);
-GEN_SPEOP_TCG_ARITH2(evandc);
-GEN_SPEOP_TCG_ARITH2(evxor);
-GEN_SPEOP_TCG_ARITH2(evor);
-GEN_SPEOP_TCG_ARITH2(evnor);
-GEN_SPEOP_TCG_ARITH2(eveqv);
-GEN_SPEOP_TCG_ARITH2(evorc);
-GEN_SPEOP_TCG_ARITH2(evnand);
-GEN_SPEOP_ARITH2(evsrwu);
-GEN_SPEOP_ARITH2(evsrws);
-GEN_SPEOP_ARITH2(evslw);
-GEN_SPEOP_ARITH2(evrlw);
-GEN_SPEOP_ARITH2(evmergehi);
-GEN_SPEOP_ARITH2(evmergelo);
-GEN_SPEOP_ARITH2(evmergehilo);
-GEN_SPEOP_ARITH2(evmergelohi);
-
-/* Arithmetic */
-GEN_SPEOP_ARITH2(evaddw);
-GEN_SPEOP_ARITH2(evsubfw);
-GEN_SPEOP_ARITH1(evabs);
-GEN_SPEOP_ARITH1(evneg);
-GEN_SPEOP_ARITH1(evextsb);
-GEN_SPEOP_ARITH1(evextsh);
-GEN_SPEOP_ARITH1(evrndw);
-GEN_SPEOP_ARITH1(evcntlzw);
-GEN_SPEOP_ARITH1(evcntlsw);
-static always_inline void gen_brinc (DisasContext *ctx)
-{
-    /* Note: brinc is usable even if SPE is disabled */
-    tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
-    tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
-    gen_op_brinc();
-    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+/* SPE arithmetic immediate */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op)                                    \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    TCGv_i32 t0 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t1 = tcg_temp_local_new_i32();                                   \
+    TCGv_i64 t2 = tcg_temp_local_new_i64();                                   \
+    tcg_gen_trunc_i64_i32(t0, cpu_gpr[rB(ctx->opcode)]);                      \
+    tcg_op(t0, t0, rA(ctx->opcode));                                          \
+    tcg_gen_shri_i64(t2, cpu_gpr[rB(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t1, t2);                                            \
+    tcg_temp_free_i64(t2);                                                        \
+    tcg_op(t1, t1, rA(ctx->opcode));                                          \
+    tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);                 \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
 }
-
-#define GEN_SPEOP_ARITH_IMM2(name)                                            \
-static always_inline void gen_##name##i (DisasContext *ctx)                   \
+#else
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op)                                    \
+static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    gen_load_gpr64(cpu_T64[0], rB(ctx->opcode));                              \
-    gen_op_splatwi_T1_64(rA(ctx->opcode));                                    \
-    gen_op_##name();                                                          \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+    tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],                \
+           rA(ctx->opcode));                                                  \
+    tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)],              \
+           rA(ctx->opcode));                                                  \
 }
+#endif
+GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
+GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
 
-#define GEN_SPEOP_LOGIC_IMM2(name)                                            \
-static always_inline void gen_##name##i (DisasContext *ctx)                   \
+/* SPE comparison */
+#if defined(TARGET_PPC64)
+#define GEN_SPEOP_COMP(name, tcg_cond)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
 {                                                                             \
     if (unlikely(!ctx->spe_enabled)) {                                        \
         GEN_EXCP_NO_AP(ctx);                                                  \
         return;                                                               \
     }                                                                         \
-    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
-    gen_op_splatwi_T1_64(rB(ctx->opcode));                                    \
-    gen_op_##name();                                                          \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+    int l1 = gen_new_label();                                                 \
+    int l2 = gen_new_label();                                                 \
+    int l3 = gen_new_label();                                                 \
+    int l4 = gen_new_label();                                                 \
+    TCGv_i32 t0 = tcg_temp_local_new_i32();                                   \
+    TCGv_i32 t1 = tcg_temp_local_new_i32();                                   \
+    TCGv_i64 t2 = tcg_temp_local_new_i64();                                   \
+    tcg_gen_trunc_i64_i32(t0, cpu_gpr[rA(ctx->opcode)]);                      \
+    tcg_gen_trunc_i64_i32(t1, cpu_gpr[rB(ctx->opcode)]);                      \
+    tcg_gen_brcond_i32(tcg_cond, t0, t1, l1);                                 \
+    tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0);                          \
+    tcg_gen_br(l2);                                                           \
+    gen_set_label(l1);                                                        \
+    tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)],                              \
+                     CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL);                  \
+    gen_set_label(l2);                                                        \
+    tcg_gen_shri_i64(t2, cpu_gpr[rA(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t0, t2);                                            \
+    tcg_gen_shri_i64(t2, cpu_gpr[rB(ctx->opcode)], 32);                       \
+    tcg_gen_trunc_i64_i32(t1, t2);                                            \
+    tcg_temp_free_i64(t2);                                                    \
+    tcg_gen_brcond_i32(tcg_cond, t0, t1, l3);                                 \
+    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],  \
+                     ~(CRF_CH | CRF_CH_AND_CL));                              \
+    tcg_gen_br(l4);                                                           \
+    gen_set_label(l3);                                                        \
+    tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],   \
+                    CRF_CH | CRF_CH_OR_CL);                                   \
+    gen_set_label(l4);                                                        \
+    tcg_temp_free_i32(t0);                                                    \
+    tcg_temp_free_i32(t1);                                                    \
+}
+#else
+#define GEN_SPEOP_COMP(name, tcg_cond)                                        \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    int l1 = gen_new_label();                                                 \
+    int l2 = gen_new_label();                                                 \
+    int l3 = gen_new_label();                                                 \
+    int l4 = gen_new_label();                                                 \
+                                                                              \
+    tcg_gen_brcond_i32(tcg_cond, cpu_gpr[rA(ctx->opcode)],                    \
+                       cpu_gpr[rB(ctx->opcode)], l1);                         \
+    tcg_gen_movi_tl(cpu_crf[crfD(ctx->opcode)], 0);                           \
+    tcg_gen_br(l2);                                                           \
+    gen_set_label(l1);                                                        \
+    tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)],                              \
+                     CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL);                  \
+    gen_set_label(l2);                                                        \
+    tcg_gen_brcond_i32(tcg_cond, cpu_gprh[rA(ctx->opcode)],                   \
+                       cpu_gprh[rB(ctx->opcode)], l3);                        \
+    tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],  \
+                     ~(CRF_CH | CRF_CH_AND_CL));                              \
+    tcg_gen_br(l4);                                                           \
+    gen_set_label(l3);                                                        \
+    tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],   \
+                    CRF_CH | CRF_CH_OR_CL);                                   \
+    gen_set_label(l4);                                                        \
+}
+#endif
+GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
+GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
+GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
+GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
+GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
+
+/* SPE misc */
+static always_inline void gen_brinc (DisasContext *ctx)
+{
+    /* Note: brinc is usable even if SPE is disabled */
+    gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
+                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static always_inline void gen_evmergelo (DisasContext *ctx)
+{
+    if (unlikely(!ctx->spe_enabled)) {
+        GEN_EXCP_NO_AP(ctx);
+        return;
+    }
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFLL);
+    tcg_gen_shli_tl(t1, cpu_gpr[rA(ctx->opcode)], 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif
+}
+static always_inline void gen_evmergehilo (DisasContext *ctx)
+{
+    if (unlikely(!ctx->spe_enabled)) {
+        GEN_EXCP_NO_AP(ctx);
+        return;
+    }
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFLL);
+    tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF0000000ULL);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+    tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
+}
+static always_inline void gen_evmergelohi (DisasContext *ctx)
+{
+    if (unlikely(!ctx->spe_enabled)) {
+        GEN_EXCP_NO_AP(ctx);
+        return;
+    }
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 32);
+    tcg_gen_shli_tl(t1, cpu_gpr[rA(ctx->opcode)], 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+    tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif
 }
-
-GEN_SPEOP_ARITH_IMM2(evaddw);
-#define gen_evaddiw gen_evaddwi
-GEN_SPEOP_ARITH_IMM2(evsubfw);
-#define gen_evsubifw gen_evsubfwi
-GEN_SPEOP_LOGIC_IMM2(evslw);
-GEN_SPEOP_LOGIC_IMM2(evsrwu);
-#define gen_evsrwis gen_evsrwsi
-GEN_SPEOP_LOGIC_IMM2(evsrws);
-#define gen_evsrwiu gen_evsrwui
-GEN_SPEOP_LOGIC_IMM2(evrlw);
-
 static always_inline void gen_evsplati (DisasContext *ctx)
 {
-    int32_t imm = (int32_t)(rA(ctx->opcode) << 27) >> 27;
+    int32_t imm = (int32_t)(rA(ctx->opcode) << 11) >> 27;
 
-    gen_op_splatwi_T0_64(imm);
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_movi_tl(t0, imm);
+    tcg_gen_shri_tl(t1, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
+    tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
+#endif
 }
-
 static always_inline void gen_evsplatfi (DisasContext *ctx)
 {
-    uint32_t imm = rA(ctx->opcode) << 27;
+    uint32_t imm = rA(ctx->opcode) << 11;
 
-    gen_op_splatwi_T0_64(imm);
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+    tcg_gen_movi_tl(t0, imm);
+    tcg_gen_shri_tl(t1, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+#else
+    tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
+    tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
+#endif
 }
 
-/* Comparison */
-GEN_SPEOP_COMP(evcmpgtu);
-GEN_SPEOP_COMP(evcmpgts);
-GEN_SPEOP_COMP(evcmpltu);
-GEN_SPEOP_COMP(evcmplts);
-GEN_SPEOP_COMP(evcmpeq);
+static always_inline void gen_evsel (DisasContext *ctx)
+{
+    int l1 = gen_new_label();
+    int l2 = gen_new_label();
+    int l3 = gen_new_label();
+    int l4 = gen_new_label();
+    TCGv_i32 t0 = tcg_temp_local_new_i32();
+#if defined(TARGET_PPC64)
+    TCGv t1 = tcg_temp_local_new();
+    TCGv t2 = tcg_temp_local_new();
+#endif
+    tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
+    tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+#if defined(TARGET_PPC64)
+    tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], 0xFFFFFFFF00000000ULL);
+#else
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+#endif
+    tcg_gen_br(l2);
+    gen_set_label(l1);
+#if defined(TARGET_PPC64)
+    tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0xFFFFFFFF00000000ULL);
+#else
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+#endif
+    gen_set_label(l2);
+    tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
+    tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
+#if defined(TARGET_PPC64)
+    tcg_gen_andi_tl(t2, cpu_gpr[rA(ctx->opcode)], 0x00000000FFFFFFFFULL);
+#else
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif
+    tcg_gen_br(l4);
+    gen_set_label(l3);
+#if defined(TARGET_PPC64)
+    tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x00000000FFFFFFFFULL);
+#else
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#endif
+    gen_set_label(l4);
+    tcg_temp_free_i32(t0);
+#if defined(TARGET_PPC64)
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t1, t2);
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+#endif
+}
+GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE)
+{
+    gen_evsel(ctx);
+}
+GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE)
+{
+    gen_evsel(ctx);
+}
+GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE)
+{
+    gen_evsel(ctx);
+}
+GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE)
+{
+    gen_evsel(ctx);
+}
 
 GEN_SPE(evaddw,         speundef,      0x00, 0x08, 0x00000000, PPC_SPE); ////
 GEN_SPE(evaddiw,        speundef,      0x01, 0x08, 0x00000000, PPC_SPE);
@@ -5934,36 +6593,6 @@ GEN_SPE(evcmpgtu,       evcmpgts,      0x18, 0x08, 0x00600000, PPC_SPE); ////
 GEN_SPE(evcmpltu,       evcmplts,      0x19, 0x08, 0x00600000, PPC_SPE); ////
 GEN_SPE(evcmpeq,        speundef,      0x1A, 0x08, 0x00600000, PPC_SPE); ////
 
-static always_inline void gen_evsel (DisasContext *ctx)
-{
-    if (unlikely(!ctx->spe_enabled)) {
-        GEN_EXCP_NO_AP(ctx);
-        return;
-    }
-    tcg_gen_mov_i32(cpu_T[0], cpu_crf[ctx->opcode & 0x7]);
-    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));
-    gen_load_gpr64(cpu_T64[1], rB(ctx->opcode));
-    gen_op_evsel();
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);
-}
-
-GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE)
-{
-    gen_evsel(ctx);
-}
-GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE)
-{
-    gen_evsel(ctx);
-}
-GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE)
-{
-    gen_evsel(ctx);
-}
-GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE)
-{
-    gen_evsel(ctx);
-}
-
 /* Load and stores */
 GEN_SPEOP_LDST(dd, 3);
 GEN_SPEOP_LDST(dw, 3);
@@ -6232,15 +6861,55 @@ static always_inline void gen_##name (DisasContext *ctx)                      \
     gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
 }
 
+#define GEN_SPEFPUOP_ARITH1(name)                                             \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
+    gen_op_##name();                                                          \
+    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+}
+
+#define GEN_SPEFPUOP_ARITH2(name)                                             \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
+    gen_load_gpr64(cpu_T64[1], rB(ctx->opcode));                              \
+    gen_op_##name();                                                          \
+    gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]);                             \
+}
+
+#define GEN_SPEFPUOP_COMP(name)                                               \
+static always_inline void gen_##name (DisasContext *ctx)                      \
+{                                                                             \
+    TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];                                \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    gen_load_gpr64(cpu_T64[0], rA(ctx->opcode));                              \
+    gen_load_gpr64(cpu_T64[1], rB(ctx->opcode));                              \
+    gen_op_##name();                                                          \
+    tcg_gen_trunc_tl_i32(crf, cpu_T[0]);                                      \
+    tcg_gen_andi_i32(crf, crf, 0xf);                                          \
+}
+
 /* Single precision floating-point vectors operations */
 /* Arithmetic */
-GEN_SPEOP_ARITH2(evfsadd);
-GEN_SPEOP_ARITH2(evfssub);
-GEN_SPEOP_ARITH2(evfsmul);
-GEN_SPEOP_ARITH2(evfsdiv);
-GEN_SPEOP_ARITH1(evfsabs);
-GEN_SPEOP_ARITH1(evfsnabs);
-GEN_SPEOP_ARITH1(evfsneg);
+GEN_SPEFPUOP_ARITH2(evfsadd);
+GEN_SPEFPUOP_ARITH2(evfssub);
+GEN_SPEFPUOP_ARITH2(evfsmul);
+GEN_SPEFPUOP_ARITH2(evfsdiv);
+GEN_SPEFPUOP_ARITH1(evfsabs);
+GEN_SPEFPUOP_ARITH1(evfsnabs);
+GEN_SPEFPUOP_ARITH1(evfsneg);
 /* Conversion */
 GEN_SPEFPUOP_CONV(evfscfui);
 GEN_SPEFPUOP_CONV(evfscfsi);
@@ -6253,12 +6922,12 @@ GEN_SPEFPUOP_CONV(evfsctsf);
 GEN_SPEFPUOP_CONV(evfsctuiz);
 GEN_SPEFPUOP_CONV(evfsctsiz);
 /* Comparison */
-GEN_SPEOP_COMP(evfscmpgt);
-GEN_SPEOP_COMP(evfscmplt);
-GEN_SPEOP_COMP(evfscmpeq);
-GEN_SPEOP_COMP(evfststgt);
-GEN_SPEOP_COMP(evfststlt);
-GEN_SPEOP_COMP(evfststeq);
+GEN_SPEFPUOP_COMP(evfscmpgt);
+GEN_SPEFPUOP_COMP(evfscmplt);
+GEN_SPEFPUOP_COMP(evfscmpeq);
+GEN_SPEFPUOP_COMP(evfststgt);
+GEN_SPEFPUOP_COMP(evfststlt);
+GEN_SPEFPUOP_COMP(evfststeq);
 
 /* Opcodes definitions */
 GEN_SPE(evfsadd,        evfssub,       0x00, 0x0A, 0x00000000, PPC_SPEFPU); //
@@ -6278,13 +6947,13 @@ GEN_SPE(evfststeq,      speundef,      0x0F, 0x0A, 0x00600000, PPC_SPEFPU); //
 
 /* Single precision floating-point operations */
 /* Arithmetic */
-GEN_SPEOP_ARITH2(efsadd);
-GEN_SPEOP_ARITH2(efssub);
-GEN_SPEOP_ARITH2(efsmul);
-GEN_SPEOP_ARITH2(efsdiv);
-GEN_SPEOP_ARITH1(efsabs);
-GEN_SPEOP_ARITH1(efsnabs);
-GEN_SPEOP_ARITH1(efsneg);
+GEN_SPEFPUOP_ARITH2(efsadd);
+GEN_SPEFPUOP_ARITH2(efssub);
+GEN_SPEFPUOP_ARITH2(efsmul);
+GEN_SPEFPUOP_ARITH2(efsdiv);
+GEN_SPEFPUOP_ARITH1(efsabs);
+GEN_SPEFPUOP_ARITH1(efsnabs);
+GEN_SPEFPUOP_ARITH1(efsneg);
 /* Conversion */
 GEN_SPEFPUOP_CONV(efscfui);
 GEN_SPEFPUOP_CONV(efscfsi);
@@ -6298,12 +6967,12 @@ GEN_SPEFPUOP_CONV(efsctuiz);
 GEN_SPEFPUOP_CONV(efsctsiz);
 GEN_SPEFPUOP_CONV(efscfd);
 /* Comparison */
-GEN_SPEOP_COMP(efscmpgt);
-GEN_SPEOP_COMP(efscmplt);
-GEN_SPEOP_COMP(efscmpeq);
-GEN_SPEOP_COMP(efststgt);
-GEN_SPEOP_COMP(efststlt);
-GEN_SPEOP_COMP(efststeq);
+GEN_SPEFPUOP_COMP(efscmpgt);
+GEN_SPEFPUOP_COMP(efscmplt);
+GEN_SPEFPUOP_COMP(efscmpeq);
+GEN_SPEFPUOP_COMP(efststgt);
+GEN_SPEFPUOP_COMP(efststlt);
+GEN_SPEFPUOP_COMP(efststeq);
 
 /* Opcodes definitions */
 GEN_SPE(efsadd,         efssub,        0x00, 0x0B, 0x00000000, PPC_SPEFPU); //
@@ -6323,13 +6992,13 @@ GEN_SPE(efststeq,       speundef,      0x0F, 0x0B, 0x00600000, PPC_SPEFPU); //
 
 /* Double precision floating-point operations */
 /* Arithmetic */
-GEN_SPEOP_ARITH2(efdadd);
-GEN_SPEOP_ARITH2(efdsub);
-GEN_SPEOP_ARITH2(efdmul);
-GEN_SPEOP_ARITH2(efddiv);
-GEN_SPEOP_ARITH1(efdabs);
-GEN_SPEOP_ARITH1(efdnabs);
-GEN_SPEOP_ARITH1(efdneg);
+GEN_SPEFPUOP_ARITH2(efdadd);
+GEN_SPEFPUOP_ARITH2(efdsub);
+GEN_SPEFPUOP_ARITH2(efdmul);
+GEN_SPEFPUOP_ARITH2(efddiv);
+GEN_SPEFPUOP_ARITH1(efdabs);
+GEN_SPEFPUOP_ARITH1(efdnabs);
+GEN_SPEFPUOP_ARITH1(efdneg);
 /* Conversion */
 
 GEN_SPEFPUOP_CONV(efdcfui);
@@ -6348,12 +7017,12 @@ GEN_SPEFPUOP_CONV(efdcfsid);
 GEN_SPEFPUOP_CONV(efdctuidz);
 GEN_SPEFPUOP_CONV(efdctsidz);
 /* Comparison */
-GEN_SPEOP_COMP(efdcmpgt);
-GEN_SPEOP_COMP(efdcmplt);
-GEN_SPEOP_COMP(efdcmpeq);
-GEN_SPEOP_COMP(efdtstgt);
-GEN_SPEOP_COMP(efdtstlt);
-GEN_SPEOP_COMP(efdtsteq);
+GEN_SPEFPUOP_COMP(efdcmpgt);
+GEN_SPEFPUOP_COMP(efdcmplt);
+GEN_SPEFPUOP_COMP(efdcmpeq);
+GEN_SPEFPUOP_COMP(efdtstgt);
+GEN_SPEFPUOP_COMP(efdtstlt);
+GEN_SPEFPUOP_COMP(efdtsteq);
 
 /* Opcodes definitions */
 GEN_SPE(efdadd,         efdsub,        0x10, 0x0B, 0x00000000, PPC_SPEFPU); //
@@ -6391,7 +7060,7 @@ void cpu_dump_state (CPUState *env, FILE *f,
     int i;
 
     cpu_fprintf(f, "NIP " ADDRX "   LR " ADDRX " CTR " ADDRX " XER %08x\n",
-                env->nip, env->lr, env->ctr, hreg_load_xer(env));
+                env->nip, env->lr, env->ctr, env->xer);
     cpu_fprintf(f, "MSR " ADDRX " HID0 " ADDRX "  HF " ADDRX " idx %d\n",
                 env->msr, env->spr[SPR_HID0], env->hflags, env->mmu_idx);
 #if !defined(NO_TIMER_DUMP)
@@ -6501,6 +7170,7 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
     target_ulong pc_start;
     uint16_t *gen_opc_end;
     int supervisor, little_endian;
+    CPUBreakpoint *bp;
     int j, lj = -1;
     int num_insns;
     int max_insns;
@@ -6555,9 +7225,9 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
     gen_icount_start();
     /* Set env in case of segfault during code fetch */
     while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
-        if (unlikely(env->nb_breakpoints > 0)) {
-            for (j = 0; j < env->nb_breakpoints; j++) {
-                if (env->breakpoints[j] == ctx.nip) {
+        if (unlikely(env->breakpoints)) {
+            for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+                if (bp->pc == ctx.nip) {
                     gen_update_nip(&ctx, ctx.nip);
                     gen_op_debug();
                     break;