*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include <stdarg.h>
#include <stdlib.h>
#include "exec-all.h"
#include "disas.h"
#include "tcg-op.h"
+#include "qemu-log.h"
+#include "helpers.h"
#define GEN_HELPER 1
#include "helpers.h"
#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
-#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
+#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
/* internal defines */
typedef struct DisasContext {
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
- int is_mem;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
#define DISAS_WFI 4
#define DISAS_SWI 5
-/* XXX: move that elsewhere */
-extern FILE *logfile;
-extern int loglevel;
-
-static TCGv cpu_env;
+static TCGv_ptr cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
-static TCGv cpu_V0, cpu_V1, cpu_M0;
+static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
/* FIXME: These should be removed. */
static TCGv cpu_T[2];
-static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
+static TCGv cpu_F0s, cpu_F1s;
+static TCGv_i64 cpu_F0d, cpu_F1d;
+
+#define ICOUNT_TEMP cpu_T[0]
+#include "gen-icount.h"
/* initialize TCG globals. */
void arm_translate_init(void)
{
- cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+ cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
+ cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
- cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
- cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
+#define GEN_HELPER 2
+#include "helpers.h"
}
/* The code generator doesn't like lots of temporaries, so maintain our own
static TCGv temps[MAX_TEMPS];
/* Allocate a temporary variable. */
-static TCGv new_tmp(void)
+static TCGv_i32 new_tmp(void)
{
TCGv tmp;
if (num_temps == MAX_TEMPS)
abort();
- if (GET_TCGV(temps[num_temps]))
+ if (GET_TCGV_I32(temps[num_temps]))
return temps[num_temps++];
- tmp = tcg_temp_new(TCG_TYPE_I32);
+ tmp = tcg_temp_new_i32();
temps[num_temps++] = tmp;
return tmp;
}
int i;
num_temps--;
i = num_temps;
- if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
+ if (TCGV_EQUAL(temps[i], tmp))
return;
/* Shuffle this temp to the last slot. */
- while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
+ while (!TCGV_EQUAL(temps[i], tmp))
i--;
while (i < num_temps) {
temps[i] = temps[i + 1];
/* Basic operations. */
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
-#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
-#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
-#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
-#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
-#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
/* Value extensions. */
-#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
-#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
{
TCGv tmp1 = new_tmp();
TCGv tmp2 = new_tmp();
- tcg_gen_ext8s_i32(tmp1, a);
- tcg_gen_ext8s_i32(tmp2, b);
+ tcg_gen_ext16s_i32(tmp1, a);
+ tcg_gen_ext16s_i32(tmp2, b);
tcg_gen_mul_i32(tmp1, tmp1, tmp2);
dead_tmp(tmp2);
tcg_gen_sari_i32(a, a, 16);
/* FIXME: Most targets have native widening multiplication.
It would be good to use that instead of a full wide multiply. */
/* 32x32->64 multiply. Marks inputs as dead. */
-static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
+static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, a);
dead_tmp(a);
return tmp1;
}
-static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
+static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp1, a);
dead_tmp(a);
/* Unsigned 32x32->64 multiply. */
static void gen_op_mull_T0_T1(void)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
/* Signed 32x32->64 multiply. */
static void gen_imull(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp1, a);
tcg_gen_ext_i32_i64(tmp2, b);
tcg_gen_shri_i64(tmp1, tmp1, 32);
tcg_gen_trunc_i64_i32(b, tmp1);
}
-#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
/* Swap low and high halfwords. */
static void gen_swap_half(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 31);
- gen_set_CF(var);
+ gen_set_CF(tmp);
dead_tmp(tmp);
}
dead_tmp(tmp);
}
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_add_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ dead_tmp(tmp);
+}
+
/* dest = T0 - T1 + CF - 1. */
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
{
#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
-/* FIXME: Implement this natively. */
-static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
-{
- tcg_gen_xori_i32(t0, t1, ~0);
-}
-
-/* FIXME: Implement this natively. */
-static inline void tcg_gen_neg_i64(TCGv dest, TCGv src)
-{
- tcg_gen_sub_i64(dest, tcg_const_i64(0), src);
-}
-
/* T0 &= ~T1. Clobbers T1. */
/* FIXME: Implement bic natively. */
static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
tcg_gen_andi_i32(tmp, var, 1);
} else {
tcg_gen_shri_i32(tmp, var, shift);
- if (shift != 31);
+ if (shift != 31)
tcg_gen_andi_i32(tmp, tmp, 1);
}
gen_set_CF(tmp);
}
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
- TCGv tmp;
+ TCGv_ptr tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 5:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
}
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
- TCGv tmp;
+ TCGv_ptr tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 4:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
{
TCGv tmp;
TCGv tmp2;
- TCGv zero;
int inv;
- zero = tcg_const_i32(0);
switch (cc) {
case 0: /* eq: Z */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 1: /* ne: !Z */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 2: /* cs: C */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 3: /* cc: !C */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 4: /* mi: N */
tmp = load_cpu_field(NF);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 5: /* pl: !N */
tmp = load_cpu_field(NF);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 6: /* vs: V */
tmp = load_cpu_field(VF);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 7: /* vc: !V */
tmp = load_cpu_field(VF);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 8: /* hi: C && !Z */
inv = gen_new_label();
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
gen_set_label(inv);
break;
case 9: /* ls: !C || Z */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 10: /* ge: N == V -> N ^ V == 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
dead_tmp(tmp);
}
-const uint8_t table_logic_cc[16] = {
+static const uint8_t table_logic_cc[16] = {
1, /* and */
1, /* xor */
0, /* sub */
gen_bx(s, tmp);
}
-#if defined(CONFIG_USER_ONLY)
-#define gen_ldst(name, s) gen_op_##name##_raw()
-#else
-#define gen_ldst(name, s) do { \
- s->is_mem = 1; \
- if (IS_USER(s)) \
- gen_op_##name##_user(); \
- else \
- gen_op_##name##_kernel(); \
- } while (0)
-#endif
+/* Variant of store_reg which uses branch&exchange logic when storing
+ to r15 in ARM architecture v7 and above. The source must be a temporary
+ and will be marked as dead. */
+static inline void store_reg_bx(CPUState *env, DisasContext *s,
+ int reg, TCGv var)
+{
+ if (reg == 15 && ENABLE_ARCH_7) {
+ gen_bx(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
static inline TCGv gen_ld8s(TCGv addr, int index)
{
TCGv tmp = new_tmp();
store_cpu_field(tmp, regs[15]);
}
-static inline void gen_set_pc_T0(void)
-{
- tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
-}
-
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
{
TCGv tmp;
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
}
-#define VFP_OP1i(name) \
-static inline void gen_vfp_##name(int dp, int arg) \
-{ \
- if (dp) \
- gen_op_vfp_##name##d(arg); \
- else \
- gen_op_vfp_##name##s(arg); \
-}
-
VFP_OP2(add)
VFP_OP2(sub)
VFP_OP2(mul)
static inline void gen_vfp_F1_ld0(int dp)
{
if (dp)
- tcg_gen_movi_i64(cpu_F0d, 0);
+ tcg_gen_movi_i64(cpu_F1d, 0);
else
- tcg_gen_movi_i32(cpu_F0s, 0);
+ tcg_gen_movi_i32(cpu_F1s, 0);
}
static inline void gen_vfp_uito(int dp)
static inline void gen_vfp_sito(int dp)
{
if (dp)
- gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
+ gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
else
- gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
+ gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_toui(int dp)
dead_tmp(var);
}
-static inline void neon_load_reg64(TCGv var, int reg)
+static inline void neon_load_reg64(TCGv_i64 var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
-static inline void neon_store_reg64(TCGv var, int reg)
+static inline void neon_store_reg64(TCGv_i64 var, int reg)
{
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
#define ARM_CP_RW_BIT (1 << 20)
-static inline void iwmmxt_load_reg(TCGv var, int reg)
+static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
-static inline void iwmmxt_store_reg(TCGv var, int reg)
+static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
- tcg_gen_andi_i64(cpu_V1, cpu_V1, 0xffffffffu);
+ tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
}
static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
{
- tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
- tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
- tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
iwmmxt_store_reg(cpu_V0, rn);
}
gen_set_pc_im(s->pc);
tmp = load_reg(s, rd);
gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
+ dead_tmp(tmp);
}
return 0;
}
TCGv tmp = new_tmp();
if (shift)
tcg_gen_shri_i32(var, var, shift);
- tcg_gen_andi_i32(var, var, 0xff);
+ tcg_gen_ext8u_i32(var, var);
tcg_gen_shli_i32(tmp, var, 8);
tcg_gen_or_i32(var, var, tmp);
tcg_gen_shli_i32(tmp, var, 16);
static void gen_neon_dup_low16(TCGv var)
{
TCGv tmp = new_tmp();
- tcg_gen_andi_i32(var, var, 0xffff);
+ tcg_gen_ext16u_i32(var, var);
tcg_gen_shli_i32(tmp, var, 16);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
} else if (size == 1) {
gen_neon_dup_low16(tmp);
}
- tmp2 = new_tmp();
- tcg_gen_mov_i32(tmp2, tmp);
- neon_store_reg(rn, 0, tmp2);
- neon_store_reg(rn, 0, tmp);
+ for (n = 0; n <= pass * 2; n++) {
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rn, n, tmp2);
+ }
+ neon_store_reg(rn, n, tmp);
} else {
/* VMOV */
switch (size) {
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPSCR:
- if (rd == 15) {
+ if (rd == 15) {
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
} else {
case 21:
case 22:
case 23:
+ case 28:
+ case 29:
+ case 30:
+ case 31:
/* Source and destination the same. */
gen_mov_F0_vreg(dp, rd);
break;
break;
case 3: /* nmsc: -fd - (fn * fm) */
gen_vfp_mul(dp);
- gen_mov_F1_vreg(dp, rd);
- gen_vfp_add(dp);
gen_vfp_neg(dp);
+ gen_mov_F1_vreg(dp, rd);
+ gen_vfp_sub(dp);
break;
case 4: /* mul: fn * fm */
gen_vfp_mul(dp);
else
i |= 0x800;
n |= i << 19;
- tcg_gen_movi_i32(cpu_F0d, ((uint64_t)n) << 32);
+ tcg_gen_movi_i32(cpu_F0s, n);
}
break;
case 15: /* extension space */
case 20: /* fshto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_shto(dp, rm);
+ gen_vfp_shto(dp, 16 - rm);
break;
case 21: /* fslto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_slto(dp, rm);
+ gen_vfp_slto(dp, 32 - rm);
break;
case 22: /* fuhto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_uhto(dp, rm);
+ gen_vfp_uhto(dp, 16 - rm);
break;
case 23: /* fulto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_ulto(dp, rm);
+ gen_vfp_ulto(dp, 32 - rm);
break;
case 24: /* ftoui */
gen_vfp_toui(dp);
case 28: /* ftosh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosh(dp, rm);
+ gen_vfp_tosh(dp, 16 - rm);
break;
case 29: /* ftosl */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosl(dp, rm);
+ gen_vfp_tosl(dp, 32 - rm);
break;
case 30: /* ftouh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_touh(dp, rm);
+ gen_vfp_touh(dp, 16 - rm);
break;
case 31: /* ftoul */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_toul(dp, rm);
+ gen_vfp_toul(dp, 32 - rm);
break;
default: /* undefined */
printf ("rn:%d\n", rn);
static inline void gen_jmp (DisasContext *s, uint32_t dest)
{
- if (__builtin_expect(s->singlestep_enabled, 0)) {
+ if (unlikely(s->singlestep_enabled)) {
/* An indirect jump so that we still trigger the debug exception. */
if (s->thumb)
dest |= 1;
return 0;
}
-/* Generate an old-style exception return. */
-static void gen_exception_return(DisasContext *s)
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv pc)
{
TCGv tmp;
- gen_set_pc_T0();
+ store_reg(s, 15, pc);
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, 0xffffffff);
dead_tmp(tmp);
}
} else /* size == 0 */ {
if (load) {
+ TCGV_UNUSED(tmp2);
for (n = 0; n < 4; n++) {
tmp = gen_ld8u(cpu_T[1], IS_USER(s));
gen_op_addl_T1_im(stride);
break;
case 3:
return 1;
+ default: /* Avoid compiler warnings. */
+ abort();
}
gen_op_addl_T1_im(1 << size);
tmp2 = new_tmp();
tcg_gen_mov_i32(tmp2, tmp);
neon_store_reg(rd, 0, tmp2);
- neon_store_reg(rd, 0, tmp);
+ neon_store_reg(rd, 1, tmp);
rd += stride;
}
stride = (1 << size) * nregs;
case 2:
tmp = gen_ld32(cpu_T[1], IS_USER(s));
break;
+ default: /* Avoid compiler warnings. */
+ abort();
}
if (size != 2) {
tmp2 = neon_load_reg(rd, pass);
tcg_gen_or_i32(dest, t, f);
}
-static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_u8(dest, src); break;
}
}
-static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
}
}
-static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
}
}
-static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
+static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
{
if (u) {
switch (size) {
}
}
-static inline void gen_neon_negl(TCGv var, int size)
+static inline void gen_neon_negl(TCGv_i64 var, int size)
{
switch (size) {
case 0: gen_helper_neon_negl_u16(var, var); break;
}
}
-static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
+static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
{
switch (size) {
case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
}
}
-static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
+static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
{
- TCGv tmp;
+ TCGv_i64 tmp;
switch ((size << 1) | u) {
case 0: gen_helper_neon_mull_s8(dest, a, b); break;
TCGv tmp;
TCGv tmp2;
TCGv tmp3;
+ TCGv_i64 tmp64;
if (!vfp_enabled(env))
return 1;
imm = (uint16_t)shift;
imm |= imm << 16;
tmp2 = tcg_const_i32(imm);
+ TCGV_UNUSED_I64(tmp64);
break;
case 2:
imm = (uint32_t)shift;
tmp2 = tcg_const_i32(imm);
+ TCGV_UNUSED_I64(tmp64);
+ break;
case 3:
- tmp2 = tcg_const_i64(shift);
+ tmp64 = tcg_const_i64(shift);
+ TCGV_UNUSED(tmp2);
break;
default:
abort();
neon_load_reg64(cpu_V0, rm + pass);
if (q) {
if (u)
- gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
else
- gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
} else {
if (u)
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
else
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
}
} else {
tmp = neon_load_reg(rm + pass, 0);
gen_neon_shift_narrow(size, tmp, tmp2, q, u);
- tcg_gen_extu_i32_i64(cpu_V0, tmp);
- dead_tmp(tmp);
- tmp = neon_load_reg(rm + pass, 1);
- gen_neon_shift_narrow(size, tmp, tmp2, q, u);
- tcg_gen_extu_i32_i64(cpu_V1, tmp);
+ tmp3 = neon_load_reg(rm + pass, 1);
+ gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
+ tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
dead_tmp(tmp);
- tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ dead_tmp(tmp3);
}
tmp = new_tmp();
if (op == 8 && !u) {
neon_store_reg(rd, pass, tmp);
}
}
- } else { /* (insn & 0x00800010 == 0x00800010) */
+ } else { /* (insn & 0x00800010 == 0x00800000) */
if (size != 3) {
op = (insn >> 8) & 0xf;
if ((insn & (1 << 6)) == 0) {
NEON_GET_REG(T0, rn, 1);
gen_neon_movl_scratch_T0(2);
}
+ TCGV_UNUSED(tmp3);
for (pass = 0; pass < 2; pass++) {
if (src1_wide) {
neon_load_reg64(cpu_V0, rn + pass);
+ TCGV_UNUSED(tmp);
} else {
if (pass == 1 && rd == rn) {
gen_neon_movl_T0_scratch(2);
}
if (src2_wide) {
neon_load_reg64(cpu_V1, rm + pass);
+ TCGV_UNUSED(tmp2);
} else {
if (pass == 1 && rd == rm) {
gen_neon_movl_T0_scratch(2);
neon_load_reg64(cpu_V1, rm);
}
} else if (q) {
- tmp = tcg_temp_new(TCG_TYPE_I64);
+ tmp64 = tcg_temp_new_i64();
if (imm < 8) {
neon_load_reg64(cpu_V0, rn);
- neon_load_reg64(tmp, rn + 1);
+ neon_load_reg64(tmp64, rn + 1);
} else {
neon_load_reg64(cpu_V0, rn + 1);
- neon_load_reg64(tmp, rm);
+ neon_load_reg64(tmp64, rm);
}
tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
- tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
+ tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
if (imm < 8) {
neon_load_reg64(cpu_V1, rm);
imm -= 8;
}
tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
- tcg_gen_shri_i64(tmp, tmp, imm * 8);
- tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
+ tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
+ tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
} else {
+ /* BUGFIX */
neon_load_reg64(cpu_V0, rn);
- tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
neon_load_reg64(cpu_V1, rm);
- tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
}
neon_store_reg64(cpu_V0, rd);
NEON_GET_REG(T0, rm, pass * 2);
NEON_GET_REG(T1, rm, pass * 2 + 1);
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
case 2: /* no-op */ break;
default: abort();
} else {
gen_op_movl_T0_T1();
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
default: abort();
}
case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
if (size == 3)
return 1;
+ TCGV_UNUSED(tmp2);
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
tmp = new_tmp();
switch (op) {
case 1: /* VREV32 */
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
default: return 1;
}
}
} else if ((insn & (1 << 10)) == 0) {
/* VTBL, VTBX. */
- n = (insn >> 5) & 0x18;
+ n = ((insn >> 5) & 0x18) + 8;
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 0);
} else {
tmp2 = neon_load_reg(rm, 0);
gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
tcg_const_i32(n));
+ dead_tmp(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
} else {
gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
tcg_const_i32(n));
neon_store_reg(rd, 0, tmp2);
- neon_store_reg(rd, 1, tmp2);
+ neon_store_reg(rd, 1, tmp3);
+ dead_tmp(tmp);
} else if ((insn & 0x380) == 0) {
/* VDUP */
if (insn & (1 << 19)) {
return 0;
}
+static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int crn = (insn >> 16) & 0xf;
+ int crm = insn & 0xf;
+ int op1 = (insn >> 21) & 7;
+ int op2 = (insn >> 5) & 7;
+ int rt = (insn >> 12) & 0xf;
+ TCGv tmp;
+
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
+ /* TEECR */
+ if (IS_USER(s))
+ return 1;
+ tmp = load_cpu_field(teecr);
+ store_reg(s, rt, tmp);
+ return 0;
+ }
+ if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
+ /* TEEHBR */
+ if (IS_USER(s) && (env->teecr & 1))
+ return 1;
+ tmp = load_cpu_field(teehbr);
+ store_reg(s, rt, tmp);
+ return 0;
+ }
+ }
+ fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
+ op1, crn, crm, op2);
+ return 1;
+}
+
+static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int crn = (insn >> 16) & 0xf;
+ int crm = insn & 0xf;
+ int op1 = (insn >> 21) & 7;
+ int op2 = (insn >> 5) & 7;
+ int rt = (insn >> 12) & 0xf;
+ TCGv tmp;
+
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
+ /* TEECR */
+ if (IS_USER(s))
+ return 1;
+ tmp = load_reg(s, rt);
+ gen_helper_set_teecr(cpu_env, tmp);
+ dead_tmp(tmp);
+ return 0;
+ }
+ if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
+ /* TEEHBR */
+ if (IS_USER(s) && (env->teecr & 1))
+ return 1;
+ tmp = load_reg(s, rt);
+ store_cpu_field(tmp, teehbr);
+ return 0;
+ }
+ }
+ fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
+ op1, crn, crm, op2);
+ return 1;
+}
+
static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
{
int cpnum;
case 10:
case 11:
return disas_vfp_insn (env, s, insn);
+ case 14:
+ /* Coprocessors 7-15 are architecturally reserved by ARM.
+ Unfortunately Intel decided to ignore this. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ goto board;
+ if (insn & (1 << 20))
+ return disas_cp14_read(env, s, insn);
+ else
+ return disas_cp14_write(env, s, insn);
case 15:
return disas_cp15_insn (env, s, insn);
default:
+ board:
/* Unknown coprocessor. See if the board has hooked it. */
return disas_cp_insn (env, s, insn);
}
/* Store a 64-bit value to a register pair. Clobbers val. */
-static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
{
TCGv tmp;
tmp = new_tmp();
}
/* load a 32-bit value from a register and perform a 64-bit accumulate. */
-static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
+static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
{
- TCGv tmp;
+ TCGv_i64 tmp;
TCGv tmp2;
- /* Load 64-bit value rd:rn. */
- tmp = tcg_temp_new(TCG_TYPE_I64);
+ /* Load value and extend to 64 bits. */
+ tmp = tcg_temp_new_i64();
tmp2 = load_reg(s, rlow);
tcg_gen_extu_i32_i64(tmp, tmp2);
dead_tmp(tmp2);
}
/* load and add a 64-bit value from a register pair. */
-static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
+static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
{
- TCGv tmp;
- TCGv tmp2;
+ TCGv_i64 tmp;
+ TCGv tmpl;
+ TCGv tmph;
/* Load 64-bit value rd:rn. */
- tmp = tcg_temp_new(TCG_TYPE_I64);
- tmp2 = load_reg(s, rhigh);
- tcg_gen_extu_i32_i64(tmp, tmp2);
- dead_tmp(tmp2);
- tcg_gen_shli_i64(tmp, tmp, 32);
- tcg_gen_add_i64(val, val, tmp);
-
- tmp2 = load_reg(s, rlow);
- tcg_gen_extu_i32_i64(tmp, tmp2);
- dead_tmp(tmp2);
+ tmpl = load_reg(s, rlow);
+ tmph = load_reg(s, rhigh);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
+ dead_tmp(tmpl);
+ dead_tmp(tmph);
tcg_gen_add_i64(val, val, tmp);
}
/* Set N and Z flags from a 64-bit value. */
-static void gen_logicq_cc(TCGv val)
+static void gen_logicq_cc(TCGv_i64 val)
{
TCGv tmp = new_tmp();
gen_helper_logicq_cc(tmp, val);
TCGv tmp2;
TCGv tmp3;
TCGv addr;
+ TCGv_i64 tmp64;
insn = ldl_code(s->pc);
s->pc += 4;
/* Coprocessor double register transfer. */
} else if ((insn & 0x0f000010) == 0x0e000010) {
/* Additional coprocessor register transfer. */
- } else if ((insn & 0x0ff10010) == 0x01000000) {
+ } else if ((insn & 0x0ff10020) == 0x01000000) {
uint32_t mask;
uint32_t val;
/* cps (privileged) */
if (insn & (1 << 18))
val |= mask;
}
- if (insn & (1 << 14)) {
+ if (insn & (1 << 17)) {
mask |= CPSR_M;
val |= (insn & 0x1f);
}
} else {
/* MOVT */
tmp = load_reg(s, rd);
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_ori_i32(tmp, tmp, val << 16);
}
store_reg(s, rd, tmp);
case 0x5: /* saturating add/subtract */
rd = (insn >> 12) & 0xf;
rn = (insn >> 16) & 0xf;
- tmp = load_reg(s, rn);
+ tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
gen_helper_double_saturate(tmp2, tmp2);
tcg_gen_sari_i32(tmp2, tmp2, 16);
else
gen_sxth(tmp2);
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
- tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tmp, tmp, tmp2);
gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
dead_tmp(tmp2);
if (op1 == 2) {
- tmp = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp, cpu_T[0]);
- gen_addq(s, tmp, rn, rd);
- gen_storeq_reg(s, rn, rd, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp64, rn, rd);
+ gen_storeq_reg(s, rn, rd, tmp64);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
/* immediate operand */
val = insn & 0xff;
shift = ((insn >> 8) & 0xf) * 2;
- if (shift)
+ if (shift) {
val = (val >> shift) | (val << (32 - shift));
- gen_op_movl_T1_im(val);
- if (logic_cc && shift)
- gen_set_CF_bit31(cpu_T[1]);
+ }
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, val);
+ if (logic_cc && shift) {
+ gen_set_CF_bit31(tmp2);
+ }
} else {
/* register */
rm = (insn) & 0xf;
- gen_movl_T1_reg(s, rm);
+ tmp2 = load_reg(s, rm);
shiftop = (insn >> 5) & 3;
if (!(insn & (1 << 4))) {
shift = (insn >> 7) & 0x1f;
- gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
+ gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
} else {
rs = (insn >> 8) & 0xf;
tmp = load_reg(s, rs);
- gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
+ gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
}
}
if (op1 != 0x0f && op1 != 0x0d) {
rn = (insn >> 16) & 0xf;
- gen_movl_T0_reg(s, rn);
+ tmp = load_reg(s, rn);
+ } else {
+ TCGV_UNUSED(tmp);
}
rd = (insn >> 12) & 0xf;
switch(op1) {
case 0x00:
- gen_op_andl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x01:
- gen_op_xorl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x02:
if (set_cc && rd == 15) {
/* SUBS r15, ... is used for exception return. */
- if (IS_USER(s))
+ if (IS_USER(s)) {
goto illegal_op;
- gen_op_subl_T0_T1_cc();
- gen_exception_return(s);
+ }
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_exception_return(s, tmp);
} else {
- if (set_cc)
- gen_op_subl_T0_T1_cc();
- else
- gen_op_subl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
}
break;
case 0x03:
- if (set_cc)
- gen_op_rsbl_T0_T1_cc();
- else
- gen_op_rsbl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sub_cc(tmp, tmp2, tmp);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x04:
- if (set_cc)
- gen_op_addl_T0_T1_cc();
- else
- gen_op_addl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_add_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x05:
- if (set_cc)
- gen_op_adcl_T0_T1_cc();
- else
- gen_adc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_adc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_add_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x06:
- if (set_cc)
- gen_op_sbcl_T0_T1_cc();
- else
- gen_sbc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sbc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_sub_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x07:
- if (set_cc)
- gen_op_rscl_T0_T1_cc();
- else
- gen_rsc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sbc_cc(tmp, tmp2, tmp);
+ } else {
+ gen_sub_carry(tmp, tmp2, tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x08:
if (set_cc) {
- gen_op_andl_T0_T1();
- gen_op_logic_T0_cc();
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
}
+ dead_tmp(tmp);
break;
case 0x09:
if (set_cc) {
- gen_op_xorl_T0_T1();
- gen_op_logic_T0_cc();
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
}
+ dead_tmp(tmp);
break;
case 0x0a:
if (set_cc) {
- gen_op_subl_T0_T1_cc();
+ gen_helper_sub_cc(tmp, tmp, tmp2);
}
+ dead_tmp(tmp);
break;
case 0x0b:
if (set_cc) {
- gen_op_addl_T0_T1_cc();
+ gen_helper_add_cc(tmp, tmp, tmp2);
}
+ dead_tmp(tmp);
break;
case 0x0c:
- gen_op_orl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x0d:
if (logic_cc && rd == 15) {
/* MOVS r15, ... is used for exception return. */
- if (IS_USER(s))
+ if (IS_USER(s)) {
goto illegal_op;
- gen_op_movl_T0_T1();
- gen_exception_return(s);
+ }
+ gen_exception_return(s, tmp2);
} else {
- gen_movl_reg_T1(s, rd);
- if (logic_cc)
- gen_op_logic_T1_cc();
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp2);
}
break;
case 0x0e:
- gen_op_bicl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_bic_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
default:
case 0x0f:
- gen_op_notl_T1();
- gen_movl_reg_T1(s, rd);
- if (logic_cc)
- gen_op_logic_T1_cc();
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp2);
break;
}
+ if (op1 != 0x0f && op1 != 0x0d) {
+ dead_tmp(tmp2);
+ }
} else {
/* other instructions */
op1 = (insn >> 24) & 0xf;
tmp = load_reg(s, rs);
tmp2 = load_reg(s, rm);
if (insn & (1 << 22))
- tmp = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
else
- tmp = gen_mulu_i64_i32(tmp, tmp2);
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
if (insn & (1 << 21)) /* mult accumulate */
- gen_addq(s, tmp, rn, rd);
+ gen_addq(s, tmp64, rn, rd);
if (!(insn & (1 << 23))) { /* double accumulate */
ARCH(6);
- gen_addq_lo(s, tmp, rn);
- gen_addq_lo(s, tmp, rd);
+ gen_addq_lo(s, tmp64, rn);
+ gen_addq_lo(s, tmp64, rd);
}
if (insn & (1 << 20))
- gen_logicq_cc(tmp);
- gen_storeq_reg(s, rn, rd, tmp);
+ gen_logicq_cc(tmp64);
+ gen_storeq_reg(s, rn, rd, tmp64);
break;
}
} else {
rd = (insn >> 12) & 0xf;
if (insn & (1 << 23)) {
/* load/store exclusive */
+ op1 = (insn >> 21) & 0x3;
+ if (op1)
+ ARCH(6K);
+ else
+ ARCH(6);
gen_movl_T1_reg(s, rn);
addr = cpu_T[1];
if (insn & (1 << 20)) {
gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
- tmp = gen_ld32(addr, IS_USER(s));
+ switch (op1) {
+ case 0: /* ldrex */
+ tmp = gen_ld32(addr, IS_USER(s));
+ break;
+ case 1: /* ldrexd */
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ rd++;
+ break;
+ case 2: /* ldrexb */
+ tmp = gen_ld8u(addr, IS_USER(s));
+ break;
+ case 3: /* ldrexh */
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
store_reg(s, rd, tmp);
} else {
int label = gen_new_label();
rm = insn & 0xf;
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
- tcg_const_i32(0), label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
tmp = load_reg(s,rm);
- gen_st32(tmp, cpu_T[1], IS_USER(s));
+ switch (op1) {
+ case 0: /* strex */
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ case 1: /* strexd */
+ gen_st32(tmp, addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rm + 1);
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ case 2: /* strexb */
+ gen_st8(tmp, addr, IS_USER(s));
+ break;
+ case 3: /* strexh */
+ gen_st16(tmp, addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
gen_set_label(label);
gen_movl_reg_T0(s, rd);
}
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
shift = (insn >> 7) & 0x1f;
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
if (insn & (1 << 6)) {
/* pkhtb */
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp2, tmp2, shift);
tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ tcg_gen_ext16u_i32(tmp2, tmp2);
} else {
/* pkhbt */
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ if (shift)
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
}
tcg_gen_or_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
/* [us]sat */
if (insn & (1 << 7))
gen_rev16(tmp);
else
- tcg_gen_bswap_i32(tmp, tmp);
+ tcg_gen_bswap32_i32(tmp, tmp);
}
store_reg(s, rd, tmp);
} else {
tmp2 = load_reg(s, rs);
if (insn & (1 << 20)) {
/* Signed multiply most significant [accumulate]. */
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
if (insn & (1 << 5))
- tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
- tcg_gen_shri_i64(tmp2, tmp2, 32);
+ tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
- if (rn != 15) {
- tmp2 = load_reg(s, rn);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ if (rd != 15) {
+ tmp2 = load_reg(s, rd);
if (insn & (1 << 6)) {
tcg_gen_sub_i32(tmp, tmp, tmp2);
} else {
}
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
} else {
if (insn & (1 << 5))
gen_swap_half(tmp2);
dead_tmp(tmp2);
if (insn & (1 << 22)) {
/* smlald, smlsld */
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp2, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
- gen_addq(s, tmp2, rn, rd);
- gen_storeq_reg(s, rn, rd, tmp2);
+ gen_addq(s, tmp64, rd, rn);
+ gen_storeq_reg(s, rd, rn, tmp64);
} else {
/* smuad, smusd, smlad, smlsd */
- if (rn != 15)
+ if (rd != 15)
{
- tmp2 = load_reg(s, rn);
+ tmp2 = load_reg(s, rd);
gen_helper_add_setq(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
}
}
break;
tmp2 = load_reg(s, rs);
gen_helper_usad8(tmp, tmp, tmp2);
dead_tmp(tmp2);
- if (rn != 15) {
- tmp2 = load_reg(s, rn);
+ if (rd != 15) {
+ tmp2 = load_reg(s, rd);
tcg_gen_add_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
break;
case 0x20: case 0x24: case 0x28: case 0x2c:
/* Bitfield insert/clear. */
break;
case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
+ ARCH(6T2);
tmp = load_reg(s, rm);
shift = (insn >> 7) & 0x1f;
i = ((insn >> 16) & 0x1f) + 1;
gen_add_data_offset(s, insn, tmp2);
if (insn & (1 << 20)) {
/* load */
- s->is_mem = 1;
if (insn & (1 << 22)) {
tmp = gen_ld8u(tmp2, i);
} else {
/* compute total size */
loaded_base = 0;
+ TCGV_UNUSED(loaded_var);
n = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i))
TCGv tmp2;
TCGv tmp3;
TCGv addr;
+ TCGv_i64 tmp64;
int op;
int shiftop;
int conds;
if (!(arm_feature(env, ARM_FEATURE_THUMB2)
|| arm_feature (env, ARM_FEATURE_M))) {
- /* Thumb-1 cores may need to tread bl and blx as a pair of
+ /* Thumb-1 cores may need to treat bl and blx as a pair of
16-bit instructions to get correct prefetch abort behavior. */
insn = insn_hw1;
if ((insn & (1 << 12)) == 0) {
/* Second half of bl. */
offset = ((insn & 0x7ff) << 1) | 1;
tmp = load_reg(s, 14);
- tcg_gen_addi_i32(tmp, tmp, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, s->pc | 1);
} else {
int label = gen_new_label();
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
- tcg_const_i32(0), label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
tmp = load_reg(s, rs);
gen_st32(tmp, cpu_T[1], IS_USER(s));
gen_set_label(label);
int label = gen_new_label();
/* Must use a global that is not killed by the branch. */
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0),
- label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
tmp = load_reg(s, rs);
switch (op) {
case 0:
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
if (logic_cc)
gen_logic_CC(tmp);
- store_reg(s, rd, tmp);
+ store_reg_bx(env, s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
tmp = load_reg(s, rm);
gen_helper_rbit(tmp, tmp);
break;
case 0x08: /* rev */
- tcg_gen_bswap_i32(tmp, tmp);
+ tcg_gen_bswap32_i32(tmp, tmp);
break;
case 0x09: /* rev16 */
gen_rev16(tmp);
tcg_gen_sari_i32(tmp2, tmp2, 16);
else
gen_sxth(tmp2);
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
- tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
tcg_gen_add_i32(tmp, tmp, tmp2);
}
dead_tmp(tmp2);
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- gen_addq(s, tmp, rs, rd);
- gen_storeq_reg(s, rs, rd, tmp);
+ /* BUGFIX */
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp64, rs, rd);
+ gen_storeq_reg(s, rs, rd, tmp64);
} else {
if (op & 0x20) {
/* Unsigned 64-bit multiply */
- tmp = gen_mulu_i64_i32(tmp, tmp2);
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
} else {
if (op & 8) {
/* smlalxy */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
dead_tmp(tmp2);
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp2, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
- tmp = tmp2;
} else {
/* Signed 64-bit multiply */
- tmp = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
}
}
if (op & 4) {
/* umaal */
- gen_addq_lo(s, tmp, rs);
- gen_addq_lo(s, tmp, rd);
+ gen_addq_lo(s, tmp64, rs);
+ gen_addq_lo(s, tmp64, rd);
} else if (op & 0x40) {
/* 64-bit accumulate. */
- gen_addq(s, tmp, rs, rd);
+ gen_addq(s, tmp64, rs, rd);
}
- gen_storeq_reg(s, rs, rd, tmp);
+ gen_storeq_reg(s, rs, rd, tmp64);
}
break;
}
if (insn & (1 << 23)) {
/* movt */
tmp = load_reg(s, rd);
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_ori_i32(tmp, tmp, imm << 16);
} else {
/* movw */
break;
case 0x9: /* neg */
if (s->condexec_mask)
- gen_op_subl_T0_T1();
+ tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
else
gen_op_subl_T0_T1_cc();
break;
tmp = load_reg(s, 13);
val = (insn & 0x7f) * 4;
if (insn & (1 << 7))
- val = -(int32_t)val;
+ val = -(int32_t)val;
tcg_gen_addi_i32(tmp, tmp, val);
store_reg(s, 13, tmp);
break;
tcg_gen_addi_i32(addr, addr, 4);
}
}
+ TCGV_UNUSED(tmp);
if (insn & (1 << 8)) {
if (insn & (1 << 11)) {
/* pop pc */
case 1: case 3: case 9: case 11: /* czb */
rm = insn & 7;
tmp = load_reg(s, rm);
- tmp2 = tcg_const_i32(0);
s->condlabel = gen_new_label();
s->condjmp = 1;
if (insn & (1 << 11))
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
else
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
dead_tmp(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
val = (uint32_t)s->pc + 2;
rd = insn & 0x7;
tmp = load_reg(s, rn);
switch ((insn >> 6) & 3) {
- case 0: tcg_gen_bswap_i32(tmp, tmp); break;
+ case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
case 1: gen_rev16(tmp); break;
case 3: gen_revsh(tmp); break;
default: goto illegal_op;
if (cond == 0xf) {
/* swi */
gen_set_condexec(s);
- gen_set_pc_im(s->pc | 1);
+ gen_set_pc_im(s->pc);
s->is_jmp = DISAS_SWI;
break;
}
case 15:
if (disas_thumb2_insn(env, s, insn))
- goto undef32;
+ goto undef32;
break;
}
return;
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline int gen_intermediate_code_internal(CPUState *env,
- TranslationBlock *tb,
- int search_pc)
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
{
DisasContext dc1, *dc = &dc1;
+ CPUBreakpoint *bp;
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
/* generate intermediate code */
num_temps = 0;
dc->thumb = env->thumb;
dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
dc->condexec_cond = env->condexec_bits >> 4;
- dc->is_mem = 0;
#if !defined(CONFIG_USER_ONLY)
if (IS_M(env)) {
dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
}
#endif
- cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
- cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
- cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
- cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
+ cpu_F0s = tcg_temp_new_i32();
+ cpu_F1s = tcg_temp_new_i32();
+ cpu_F0d = tcg_temp_new_i64();
+ cpu_F1d = tcg_temp_new_i64();
cpu_V0 = cpu_F0d;
cpu_V1 = cpu_F1d;
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
- cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
+ cpu_M0 = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0)
+ max_insns = CF_COUNT_MASK;
+
+ gen_icount_start();
/* Reset the conditional execution bits immediately. This avoids
complications trying to do it at the end of the block. */
if (env->condexec_bits)
store_cpu_field(tmp, condexec_bits);
}
do {
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ /* Intercept jump to the magic kernel page. */
+ if (dc->pc >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception(EXCP_KERNEL_TRAP);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ }
+#else
if (dc->pc >= 0xfffffff0 && IS_M(env)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception(EXCP_EXCEPTION_EXIT);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
}
#endif
- if (env->nb_breakpoints > 0) {
- for(j = 0; j < env->nb_breakpoints; j++) {
- if (env->breakpoints[j] == dc->pc) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
gen_exception(EXCP_DEBUG);
}
gen_opc_pc[lj] = dc->pc;
gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
}
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+
if (env->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
- /* Terminate the TB on memory ops if watchpoints are present. */
- /* FIXME: This should be replacd by the deterministic execution
- * IRQ raising bits. */
- if (dc->is_mem && env->nb_watchpoints)
- break;
-
- /* Translation stops when a conditional branch is enoutered.
+ /* Translation stops when a conditional branch is encountered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
- * ensures prefech aborts occur at the right place. */
+ * ensures prefetch aborts occur at the right place. */
+ num_insns ++;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
- dc->pc < next_page_start);
+ !singlestep &&
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(env, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (__builtin_expect(env->singlestep_enabled, 0)) {
+ if (unlikely(env->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
gen_set_condexec(dc);
dc->condjmp = 0;
}
}
+
done_generating:
+ gen_icount_end(tb, num_insns);
*gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
- if (loglevel & CPU_LOG_TB_IN_ASM) {
- fprintf(logfile, "----------------\n");
- fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
- target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
- fprintf(logfile, "\n");
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
+ qemu_log("\n");
}
#endif
if (search_pc) {
gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
- return 0;
}
-int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(env, tb, 0);
}
-int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(env, tb, 1);
}
static const char *cpu_mode_names[16] = {
int flags)
{
int i;
+#if 0
union {
uint32_t i;
float s;
float64 f64;
double d;
} d0;
+#endif
uint32_t psr;
for(i=0;i<16;i++) {
#endif
}
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ env->regs[15] = gen_opc_pc[pc_pos];
+}