4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext {
50 /* Nonzero if this instruction has been conditionally skipped. */
52 /* The label that will be jumped to when the instruction is skipped. */
54 /* Thumb-2 condtional execution bits. */
57 struct TranslationBlock *tb;
58 int singlestep_enabled;
60 #if !defined(CONFIG_USER_ONLY)
65 #if defined(CONFIG_USER_ONLY)
68 #define IS_USER(s) (s->user)
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
76 static TCGv_ptr cpu_env;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s, cpu_F1s;
83 static TCGv_i64 cpu_F0d, cpu_F1d;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 /* initialize TCG globals. */
89 void arm_translate_init(void)
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
100 /* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
103 static int num_temps;
104 static TCGv temps[MAX_TEMPS];
106 /* Allocate a temporary variable. */
107 static TCGv_i32 new_tmp(void)
110 if (num_temps == MAX_TEMPS)
113 if (GET_TCGV_I32(temps[num_temps]))
114 return temps[num_temps++];
116 tmp = tcg_temp_new_i32();
117 temps[num_temps++] = tmp;
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp)
127 if (TCGV_EQUAL(temps[i], tmp))
130 /* Shuffle this temp to the last slot. */
131 while (!TCGV_EQUAL(temps[i], tmp))
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
140 static inline TCGv load_cpu_offset(int offset)
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149 static inline void store_cpu_offset(TCGv var, int offset)
151 tcg_gen_st_i32(var, cpu_env, offset);
155 #define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
158 /* Set a variable to the value of a CPU register. */
159 static void load_reg_var(DisasContext *s, TCGv var, int reg)
163 /* normaly, since we updated PC, we need only to add one insn */
165 addr = (long)s->pc + 2;
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
174 /* Create a new temporary and set it to the value of a CPU register. */
175 static inline TCGv load_reg(DisasContext *s, int reg)
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
182 /* Set a CPU register. The source must be a temporary and will be
184 static void store_reg(DisasContext *s, int reg, TCGv var)
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
195 /* Basic operations. */
196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
212 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
220 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
238 static void gen_exception(int excp)
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
246 static void gen_smul_dual(TCGv a, TCGv b)
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var)
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var)
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var, int shift, int width)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
311 tcg_gen_andi_i32(val, val, mask);
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
314 tcg_gen_or_i32(dest, base, val);
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a, TCGv b)
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
332 tcg_gen_extu_i32_i64(tmp1, a);
334 tcg_gen_extu_i32_i64(tmp2, b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1, a);
347 tcg_gen_ext_i32_i64(tmp2, b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
356 TCGv_i64 tmp1 = tcg_temp_new_i64();
357 TCGv_i64 tmp2 = tcg_temp_new_i64();
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a, TCGv b)
370 TCGv_i64 tmp1 = tcg_temp_new_i64();
371 TCGv_i64 tmp2 = tcg_temp_new_i64();
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
376 tcg_gen_trunc_i64_i32(a, tmp1);
377 tcg_gen_shri_i64(tmp1, tmp1, 32);
378 tcg_gen_trunc_i64_i32(b, tmp1);
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv var)
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv t0, TCGv t1)
400 TCGv tmp = new_tmp();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
411 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413 /* Set CF to the top bit of var. */
414 static void gen_set_CF_bit31(TCGv var)
416 TCGv tmp = new_tmp();
417 tcg_gen_shri_i32(tmp, var, 31);
422 /* Set N and Z flags from var. */
423 static inline void gen_logic_CC(TCGv var)
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
430 static void gen_adc_T0_T1(void)
434 tmp = load_cpu_field(CF);
435 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
443 tcg_gen_sub_i32(dest, t0, t1);
444 tmp = load_cpu_field(CF);
445 tcg_gen_add_i32(dest, dest, tmp);
446 tcg_gen_subi_i32(dest, dest, 1);
450 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453 /* T0 &= ~T1. Clobbers T1. */
454 /* FIXME: Implement bic natively. */
455 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
457 TCGv tmp = new_tmp();
458 tcg_gen_not_i32(tmp, t1);
459 tcg_gen_and_i32(dest, t0, tmp);
462 static inline void gen_op_bicl_T0_T1(void)
468 /* FIXME: Implement this natively. */
469 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471 /* FIXME: Implement this natively. */
472 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
480 tcg_gen_shri_i32(tmp, t1, i);
481 tcg_gen_shli_i32(t1, t1, 32 - i);
482 tcg_gen_or_i32(t0, t1, tmp);
486 static void shifter_out_im(TCGv var, int shift)
488 TCGv tmp = new_tmp();
490 tcg_gen_andi_i32(tmp, var, 1);
492 tcg_gen_shri_i32(tmp, var, shift);
494 tcg_gen_andi_i32(tmp, tmp, 1);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
507 shifter_out_im(var, 32 - shift);
508 tcg_gen_shli_i32(var, var, shift);
514 tcg_gen_shri_i32(var, var, 31);
517 tcg_gen_movi_i32(var, 0);
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
528 shifter_out_im(var, shift - 1);
531 tcg_gen_sari_i32(var, var, shift);
533 case 3: /* ROR/RRX */
536 shifter_out_im(var, shift - 1);
537 tcg_gen_rori_i32(var, var, shift); break;
539 TCGv tmp = load_cpu_field(CF);
541 shifter_out_im(var, 0);
542 tcg_gen_shri_i32(var, var, 1);
543 tcg_gen_shli_i32(tmp, tmp, 31);
544 tcg_gen_or_i32(var, var, tmp);
550 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
551 TCGv shift, int flags)
555 case 0: gen_helper_shl_cc(var, var, shift); break;
556 case 1: gen_helper_shr_cc(var, var, shift); break;
557 case 2: gen_helper_sar_cc(var, var, shift); break;
558 case 3: gen_helper_ror_cc(var, var, shift); break;
562 case 0: gen_helper_shl(var, var, shift); break;
563 case 1: gen_helper_shr(var, var, shift); break;
564 case 2: gen_helper_sar(var, var, shift); break;
565 case 3: gen_helper_ror(var, var, shift); break;
571 #define PAS_OP(pfx) \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 tmp = tcg_temp_new_ptr();
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
592 tmp = tcg_temp_new_ptr();
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
610 #undef gen_pas_helper
615 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616 #define PAS_OP(pfx) \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
630 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 tmp = tcg_temp_new_ptr();
633 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
637 tmp = tcg_temp_new_ptr();
638 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
641 #undef gen_pas_helper
642 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
655 #undef gen_pas_helper
660 static void gen_test_cc(int cc, int label)
668 tmp = load_cpu_field(ZF);
669 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
672 tmp = load_cpu_field(ZF);
673 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
676 tmp = load_cpu_field(CF);
677 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
680 tmp = load_cpu_field(CF);
681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
684 tmp = load_cpu_field(NF);
685 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
688 tmp = load_cpu_field(NF);
689 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
692 tmp = load_cpu_field(VF);
693 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
696 tmp = load_cpu_field(VF);
697 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
699 case 8: /* hi: C && !Z */
700 inv = gen_new_label();
701 tmp = load_cpu_field(CF);
702 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
704 tmp = load_cpu_field(ZF);
705 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
708 case 9: /* ls: !C || Z */
709 tmp = load_cpu_field(CF);
710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
712 tmp = load_cpu_field(ZF);
713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp = load_cpu_field(VF);
717 tmp2 = load_cpu_field(NF);
718 tcg_gen_xor_i32(tmp, tmp, tmp2);
720 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp = load_cpu_field(VF);
724 tmp2 = load_cpu_field(NF);
725 tcg_gen_xor_i32(tmp, tmp, tmp2);
727 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
729 case 12: /* gt: !Z && N == V */
730 inv = gen_new_label();
731 tmp = load_cpu_field(ZF);
732 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
734 tmp = load_cpu_field(VF);
735 tmp2 = load_cpu_field(NF);
736 tcg_gen_xor_i32(tmp, tmp, tmp2);
738 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
741 case 13: /* le: Z || N != V */
742 tmp = load_cpu_field(ZF);
743 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
745 tmp = load_cpu_field(VF);
746 tmp2 = load_cpu_field(NF);
747 tcg_gen_xor_i32(tmp, tmp, tmp2);
749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
752 fprintf(stderr, "Bad condition code 0x%x\n", cc);
758 static const uint8_t table_logic_cc[16] = {
777 /* Set PC and Thumb state from an immediate address. */
778 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
782 s->is_jmp = DISAS_UPDATE;
784 if (s->thumb != (addr & 1)) {
785 tcg_gen_movi_i32(tmp, addr & 1);
786 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
788 tcg_gen_movi_i32(tmp, addr & ~1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
793 /* Set PC and Thumb state from var. var is marked as dead. */
794 static inline void gen_bx(DisasContext *s, TCGv var)
798 s->is_jmp = DISAS_UPDATE;
800 tcg_gen_andi_i32(tmp, var, 1);
801 store_cpu_field(tmp, thumb);
802 tcg_gen_andi_i32(var, var, ~1);
803 store_cpu_field(var, regs[15]);
806 /* TODO: This should be removed. Use gen_bx instead. */
807 static inline void gen_bx_T0(DisasContext *s)
809 TCGv tmp = new_tmp();
810 tcg_gen_mov_i32(tmp, cpu_T[0]);
814 static inline TCGv gen_ld8s(TCGv addr, int index)
816 TCGv tmp = new_tmp();
817 tcg_gen_qemu_ld8s(tmp, addr, index);
820 static inline TCGv gen_ld8u(TCGv addr, int index)
822 TCGv tmp = new_tmp();
823 tcg_gen_qemu_ld8u(tmp, addr, index);
826 static inline TCGv gen_ld16s(TCGv addr, int index)
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld16s(tmp, addr, index);
832 static inline TCGv gen_ld16u(TCGv addr, int index)
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld16u(tmp, addr, index);
838 static inline TCGv gen_ld32(TCGv addr, int index)
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld32u(tmp, addr, index);
844 static inline void gen_st8(TCGv val, TCGv addr, int index)
846 tcg_gen_qemu_st8(val, addr, index);
849 static inline void gen_st16(TCGv val, TCGv addr, int index)
851 tcg_gen_qemu_st16(val, addr, index);
854 static inline void gen_st32(TCGv val, TCGv addr, int index)
856 tcg_gen_qemu_st32(val, addr, index);
860 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
862 load_reg_var(s, cpu_T[0], reg);
865 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
867 load_reg_var(s, cpu_T[1], reg);
870 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
872 load_reg_var(s, cpu_T[2], reg);
875 static inline void gen_set_pc_im(uint32_t val)
877 TCGv tmp = new_tmp();
878 tcg_gen_movi_i32(tmp, val);
879 store_cpu_field(tmp, regs[15]);
882 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
887 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
891 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
894 s->is_jmp = DISAS_JUMP;
898 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
900 gen_movl_reg_TN(s, reg, 0);
903 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
905 gen_movl_reg_TN(s, reg, 1);
908 /* Force a TB lookup after an instruction that changes the CPU state. */
909 static inline void gen_lookup_tb(DisasContext *s)
911 gen_op_movl_T0_im(s->pc);
912 gen_movl_reg_T0(s, 15);
913 s->is_jmp = DISAS_UPDATE;
916 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
919 int val, rm, shift, shiftop;
922 if (!(insn & (1 << 25))) {
925 if (!(insn & (1 << 23)))
928 tcg_gen_addi_i32(var, var, val);
932 shift = (insn >> 7) & 0x1f;
933 shiftop = (insn >> 5) & 3;
934 offset = load_reg(s, rm);
935 gen_arm_shift_im(offset, shiftop, shift, 0);
936 if (!(insn & (1 << 23)))
937 tcg_gen_sub_i32(var, var, offset);
939 tcg_gen_add_i32(var, var, offset);
944 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
950 if (insn & (1 << 22)) {
952 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
953 if (!(insn & (1 << 23)))
957 tcg_gen_addi_i32(var, var, val);
961 tcg_gen_addi_i32(var, var, extra);
963 offset = load_reg(s, rm);
964 if (!(insn & (1 << 23)))
965 tcg_gen_sub_i32(var, var, offset);
967 tcg_gen_add_i32(var, var, offset);
972 #define VFP_OP2(name) \
973 static inline void gen_vfp_##name(int dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
988 static inline void gen_vfp_abs(int dp)
991 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
993 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
996 static inline void gen_vfp_neg(int dp)
999 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1004 static inline void gen_vfp_sqrt(int dp)
1007 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1009 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1012 static inline void gen_vfp_cmp(int dp)
1015 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1017 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1020 static inline void gen_vfp_cmpe(int dp)
1023 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1025 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1028 static inline void gen_vfp_F1_ld0(int dp)
1031 tcg_gen_movi_i64(cpu_F1d, 0);
1033 tcg_gen_movi_i32(cpu_F1s, 0);
1036 static inline void gen_vfp_uito(int dp)
1039 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1041 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1044 static inline void gen_vfp_sito(int dp)
1047 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1049 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1052 static inline void gen_vfp_toui(int dp)
1055 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1057 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1060 static inline void gen_vfp_touiz(int dp)
1063 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1065 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1068 static inline void gen_vfp_tosi(int dp)
1071 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1073 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1076 static inline void gen_vfp_tosiz(int dp)
1079 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1081 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1084 #define VFP_GEN_FIX(name) \
1085 static inline void gen_vfp_##name(int dp, int shift) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1102 static inline void gen_vfp_ld(DisasContext *s, int dp)
1105 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1107 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1110 static inline void gen_vfp_st(DisasContext *s, int dp)
1113 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1115 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1119 vfp_reg_offset (int dp, int reg)
1122 return offsetof(CPUARMState, vfp.regs[reg]);
1124 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1125 + offsetof(CPU_DoubleU, l.upper);
1127 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1128 + offsetof(CPU_DoubleU, l.lower);
1132 /* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1135 neon_reg_offset (int reg, int n)
1139 return vfp_reg_offset(0, sreg);
1142 /* FIXME: Remove these. */
1143 #define neon_T0 cpu_T[0]
1144 #define neon_T1 cpu_T[1]
1145 #define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147 #define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1150 static TCGv neon_load_reg(int reg, int pass)
1152 TCGv tmp = new_tmp();
1153 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1157 static void neon_store_reg(int reg, int pass, TCGv var)
1159 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1163 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1165 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1168 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1170 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1173 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1174 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1175 #define tcg_gen_st_f32 tcg_gen_st_i32
1176 #define tcg_gen_st_f64 tcg_gen_st_i64
1178 static inline void gen_mov_F0_vreg(int dp, int reg)
1181 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1183 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1186 static inline void gen_mov_F1_vreg(int dp, int reg)
1189 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1191 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1194 static inline void gen_mov_vreg_F0(int dp, int reg)
1197 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1199 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1202 #define ARM_CP_RW_BIT (1 << 20)
1204 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1206 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1209 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1211 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1214 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1216 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1219 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1221 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1224 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1226 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1229 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1231 iwmmxt_store_reg(cpu_M0, rn);
1234 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1236 iwmmxt_load_reg(cpu_M0, rn);
1239 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1241 iwmmxt_load_reg(cpu_V1, rn);
1242 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1245 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1247 iwmmxt_load_reg(cpu_V1, rn);
1248 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1251 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1253 iwmmxt_load_reg(cpu_V1, rn);
1254 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1257 #define IWMMXT_OP(name) \
1258 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1264 #define IWMMXT_OP_ENV(name) \
1265 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1271 #define IWMMXT_OP_ENV_SIZE(name) \
1272 IWMMXT_OP_ENV(name##b) \
1273 IWMMXT_OP_ENV(name##w) \
1274 IWMMXT_OP_ENV(name##l)
1276 #define IWMMXT_OP_ENV1(name) \
1277 static inline void gen_op_iwmmxt_##name##_M0(void) \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1293 IWMMXT_OP_ENV_SIZE(unpackl)
1294 IWMMXT_OP_ENV_SIZE(unpackh)
1296 IWMMXT_OP_ENV1(unpacklub)
1297 IWMMXT_OP_ENV1(unpackluw)
1298 IWMMXT_OP_ENV1(unpacklul)
1299 IWMMXT_OP_ENV1(unpackhub)
1300 IWMMXT_OP_ENV1(unpackhuw)
1301 IWMMXT_OP_ENV1(unpackhul)
1302 IWMMXT_OP_ENV1(unpacklsb)
1303 IWMMXT_OP_ENV1(unpacklsw)
1304 IWMMXT_OP_ENV1(unpacklsl)
1305 IWMMXT_OP_ENV1(unpackhsb)
1306 IWMMXT_OP_ENV1(unpackhsw)
1307 IWMMXT_OP_ENV1(unpackhsl)
1309 IWMMXT_OP_ENV_SIZE(cmpeq)
1310 IWMMXT_OP_ENV_SIZE(cmpgtu)
1311 IWMMXT_OP_ENV_SIZE(cmpgts)
1313 IWMMXT_OP_ENV_SIZE(mins)
1314 IWMMXT_OP_ENV_SIZE(minu)
1315 IWMMXT_OP_ENV_SIZE(maxs)
1316 IWMMXT_OP_ENV_SIZE(maxu)
1318 IWMMXT_OP_ENV_SIZE(subn)
1319 IWMMXT_OP_ENV_SIZE(addn)
1320 IWMMXT_OP_ENV_SIZE(subu)
1321 IWMMXT_OP_ENV_SIZE(addu)
1322 IWMMXT_OP_ENV_SIZE(subs)
1323 IWMMXT_OP_ENV_SIZE(adds)
1325 IWMMXT_OP_ENV(avgb0)
1326 IWMMXT_OP_ENV(avgb1)
1327 IWMMXT_OP_ENV(avgw0)
1328 IWMMXT_OP_ENV(avgw1)
1332 IWMMXT_OP_ENV(packuw)
1333 IWMMXT_OP_ENV(packul)
1334 IWMMXT_OP_ENV(packuq)
1335 IWMMXT_OP_ENV(packsw)
1336 IWMMXT_OP_ENV(packsl)
1337 IWMMXT_OP_ENV(packsq)
1339 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1344 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1349 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1351 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1354 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1356 iwmmxt_load_reg(cpu_V1, rn);
1357 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1360 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1362 TCGv tmp = tcg_const_i32(shift);
1363 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1366 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1368 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1369 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1370 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1373 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1375 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1376 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1377 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1380 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1385 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1388 static void gen_op_iwmmxt_set_mup(void)
1391 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392 tcg_gen_ori_i32(tmp, tmp, 2);
1393 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1396 static void gen_op_iwmmxt_set_cup(void)
1399 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 tcg_gen_ori_i32(tmp, tmp, 1);
1401 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1404 static void gen_op_iwmmxt_setpsr_nz(void)
1406 TCGv tmp = new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1408 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1411 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1413 iwmmxt_load_reg(cpu_V1, rn);
1414 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1415 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1419 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1421 iwmmxt_load_reg(cpu_V0, rn);
1422 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1423 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1427 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1429 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1430 iwmmxt_store_reg(cpu_V0, rn);
1433 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1438 rd = (insn >> 16) & 0xf;
1439 gen_movl_T1_reg(s, rd);
1441 offset = (insn & 0xff) << ((insn >> 7) & 2);
1442 if (insn & (1 << 24)) {
1444 if (insn & (1 << 23))
1445 gen_op_addl_T1_im(offset);
1447 gen_op_addl_T1_im(-offset);
1449 if (insn & (1 << 21))
1450 gen_movl_reg_T1(s, rd);
1451 } else if (insn & (1 << 21)) {
1453 if (insn & (1 << 23))
1454 gen_op_movl_T0_im(offset);
1456 gen_op_movl_T0_im(- offset);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s, rd);
1459 } else if (!(insn & (1 << 23)))
1464 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1466 int rd = (insn >> 0) & 0xf;
1468 if (insn & (1 << 8))
1469 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1472 gen_op_iwmmxt_movl_T0_wCx(rd);
1474 gen_iwmmxt_movl_T0_T1_wRn(rd);
1476 gen_op_movl_T1_im(mask);
1477 gen_op_andl_T0_T1();
1481 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1486 int rdhi, rdlo, rd0, rd1, i;
1489 if ((insn & 0x0e000e00) == 0x0c000000) {
1490 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1492 rdlo = (insn >> 12) & 0xf;
1493 rdhi = (insn >> 16) & 0xf;
1494 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1495 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1496 gen_movl_reg_T0(s, rdlo);
1497 gen_movl_reg_T1(s, rdhi);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s, rdlo);
1500 gen_movl_T1_reg(s, rdhi);
1501 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1502 gen_op_iwmmxt_set_mup();
1507 wrd = (insn >> 12) & 0xf;
1508 if (gen_iwmmxt_address(s, insn))
1510 if (insn & ARM_CP_RW_BIT) {
1511 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1512 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1513 tcg_gen_mov_i32(cpu_T[0], tmp);
1515 gen_op_iwmmxt_movl_wCx_T0(wrd);
1518 if (insn & (1 << 8)) {
1519 if (insn & (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1522 } else { /* WLDRW wRd */
1523 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1526 if (insn & (1 << 22)) { /* WLDRH */
1527 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1528 } else { /* WLDRB */
1529 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1533 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1536 gen_op_iwmmxt_movq_wRn_M0(wrd);
1539 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd);
1542 tcg_gen_mov_i32(tmp, cpu_T[0]);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
1545 gen_op_iwmmxt_movq_M0_wRn(wrd);
1547 if (insn & (1 << 8)) {
1548 if (insn & (1 << 22)) { /* WSTRD */
1550 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1553 gen_st32(tmp, cpu_T[1], IS_USER(s));
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1558 gen_st16(tmp, cpu_T[1], IS_USER(s));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1561 gen_st8(tmp, cpu_T[1], IS_USER(s));
1569 if ((insn & 0x0f000000) != 0x0e000000)
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1584 case 0x011: /* TMCR */
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1596 case ARM_IWMMXT_wCSSF:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd);
1598 gen_movl_T1_reg(s, rd);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd);
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s, rd);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd);
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1625 case 0x111: /* TMRC */
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd);
1631 gen_movl_reg_T0(s, rd);
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
1818 gen_movl_T0_reg(s, rd);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
1820 switch ((insn >> 6) & 3) {
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd = (insn >> 12) & 0xf;
1841 wrd = (insn >> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd);
1845 switch ((insn >> 22) & 3) {
1848 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1850 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1855 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1857 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1861 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1866 gen_movl_reg_T0(s, rd);
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn & 0x000ff008) != 0x0003f000)
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1872 switch ((insn >> 22) & 3) {
1874 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1877 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1880 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1885 gen_op_shll_T1_im(28);
1886 gen_set_nzcv(cpu_T[1]);
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd = (insn >> 12) & 0xf;
1890 wrd = (insn >> 16) & 0xf;
1891 gen_movl_T0_reg(s, rd);
1892 switch ((insn >> 6) & 3) {
1894 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1897 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1900 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn & 0x000ff00f) != 0x0003f000)
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1912 switch ((insn >> 22) & 3) {
1914 for (i = 0; i < 7; i ++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1920 for (i = 0; i < 3; i ++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1932 gen_set_nzcv(cpu_T[0]);
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1940 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1943 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1946 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn & 0x000ff00f) != 0x0003f000)
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1958 switch ((insn >> 22) & 3) {
1960 for (i = 0; i < 7; i ++) {
1961 gen_op_shll_T1_im(4);
1966 for (i = 0; i < 3; i ++) {
1967 gen_op_shll_T1_im(8);
1972 gen_op_shll_T1_im(16);
1978 gen_set_nzcv(cpu_T[0]);
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 16) & 0xf;
1983 if ((insn & 0xf) != 0)
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
1986 switch ((insn >> 22) & 3) {
1988 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
1991 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
1994 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
1999 gen_movl_reg_T0(s, rd);
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 rd1 = (insn >> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2015 if (insn & (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2040 if (insn & (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2043 gen_op_iwmmxt_unpacklub_M0();
2046 if (insn & (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2049 gen_op_iwmmxt_unpackluw_M0();
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2055 gen_op_iwmmxt_unpacklul_M0();
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2074 gen_op_iwmmxt_unpackhub_M0();
2077 if (insn & (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2080 gen_op_iwmmxt_unpackhuw_M0();
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2086 gen_op_iwmmxt_unpackhul_M0();
2091 gen_op_iwmmxt_movq_wRn_M0(wrd);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0);
2100 if (gen_iwmmxt_shift(insn, 0xff))
2102 switch ((insn >> 22) & 3) {
2106 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2109 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2112 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (gen_iwmmxt_shift(insn, 0xff))
2126 switch ((insn >> 22) & 3) {
2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (gen_iwmmxt_shift(insn, 0xff))
2150 switch ((insn >> 22) & 3) {
2154 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2157 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2160 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd = (insn >> 12) & 0xf;
2170 rd0 = (insn >> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 switch ((insn >> 22) & 3) {
2176 if (gen_iwmmxt_shift(insn, 0xf))
2178 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2181 if (gen_iwmmxt_shift(insn, 0x1f))
2183 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2186 if (gen_iwmmxt_shift(insn, 0x3f))
2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 rd1 = (insn >> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
2201 switch ((insn >> 22) & 3) {
2203 if (insn & (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2206 gen_op_iwmmxt_minub_M0_wRn(rd1);
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2218 gen_op_iwmmxt_minul_M0_wRn(rd1);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 switch ((insn >> 22) & 3) {
2234 if (insn & (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2240 if (insn & (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 rd1 = (insn >> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
2263 gen_op_movl_T0_im((insn >> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd);
2266 gen_op_iwmmxt_set_mup();
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 20) & 0xf) {
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2281 gen_op_iwmmxt_subub_M0_wRn(rd1);
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2299 gen_op_iwmmxt_subul_M0_wRn(rd1);
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2319 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 if (!(insn & (1 << 20)))
2377 switch ((insn >> 22) & 3) {
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2390 gen_op_iwmmxt_packul_M0_wRn(rd1);
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd = (insn >> 5) & 0xf;
2408 rd0 = (insn >> 12) & 0xf;
2409 rd1 = (insn >> 0) & 0xf;
2410 if (rd0 == 0xf || rd1 == 0xf)
2412 gen_op_iwmmxt_movq_M0_wRn(wrd);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_movl_T0_reg(s, rd0);
2416 gen_movl_T1_reg(s, rd1);
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2419 case 0x8: /* TMIAPH */
2420 gen_movl_T0_reg(s, rd0);
2421 gen_movl_T1_reg(s, rd1);
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 gen_movl_T1_reg(s, rd0);
2426 if (insn & (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
2429 gen_movl_T1_reg(s, rd1);
2430 if (insn & (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2437 gen_op_iwmmxt_movq_wRn_M0(wrd);
2438 gen_op_iwmmxt_set_mup();
2447 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2451 int acc, rd0, rd1, rdhi, rdlo;
2453 if ((insn & 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0 = (insn >> 12) & 0xf;
2457 acc = (insn >> 5) & 7;
2462 switch ((insn >> 16) & 0xf) {
2464 gen_movl_T0_reg(s, rd0);
2465 gen_movl_T1_reg(s, rd1);
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2468 case 0x8: /* MIAPH */
2469 gen_movl_T0_reg(s, rd0);
2470 gen_movl_T1_reg(s, rd1);
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 gen_movl_T1_reg(s, rd0);
2478 if (insn & (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
2481 gen_movl_T1_reg(s, rd1);
2482 if (insn & (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2490 gen_op_iwmmxt_movq_wRn_M0(acc);
2494 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi = (insn >> 16) & 0xf;
2497 rdlo = (insn >> 12) & 0xf;
2503 if (insn & ARM_CP_RW_BIT) { /* MRA */
2504 gen_iwmmxt_movl_T0_T1_wRn(acc);
2505 gen_movl_reg_T0(s, rdlo);
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
2508 gen_movl_reg_T0(s, rdhi);
2510 gen_movl_T0_reg(s, rdlo);
2511 gen_movl_T1_reg(s, rdhi);
2512 gen_iwmmxt_movl_wRn_T0_T1(acc);
2520 /* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2525 uint32_t rd = (insn >> 12) & 0xf;
2526 uint32_t cp = (insn >> 8) & 0xf;
2531 if (insn & ARM_CP_RW_BIT) {
2532 if (!env->cp[cp].cp_read)
2534 gen_set_pc_im(s->pc);
2536 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2537 store_reg(s, rd, tmp);
2539 if (!env->cp[cp].cp_write)
2541 gen_set_pc_im(s->pc);
2542 tmp = load_reg(s, rd);
2543 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2549 static int cp15_user_ok(uint32_t insn)
2551 int cpn = (insn >> 16) & 0xf;
2552 int cpm = insn & 0xf;
2553 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2555 if (cpn == 13 && cpm == 0) {
2557 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2561 /* ISB, DSB, DMB. */
2562 if ((cpm == 5 && op == 4)
2563 || (cpm == 10 && (op == 4 || op == 5)))
2569 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
2571 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env, ARM_FEATURE_M))
2580 if ((insn & (1 << 25)) == 0) {
2581 if (insn & (1 << 20)) {
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2588 if ((insn & (1 << 4)) == 0) {
2592 if (IS_USER(s) && !cp15_user_ok(insn)) {
2595 if ((insn & 0x0fff0fff) == 0x0e070f90
2596 || (insn & 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
2598 gen_set_pc_im(s->pc);
2599 s->is_jmp = DISAS_WFI;
2602 rd = (insn >> 12) & 0xf;
2603 if (insn & ARM_CP_RW_BIT) {
2605 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2606 /* If the destination register is r15 then sets condition codes. */
2608 store_reg(s, rd, tmp);
2612 tmp = load_reg(s, rd);
2613 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2625 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626 #define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2633 if (insn & (1 << (smallbit))) \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2638 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645 /* Move between integer and VFP cores. */
2646 static TCGv gen_vfp_mrs(void)
2648 TCGv tmp = new_tmp();
2649 tcg_gen_mov_i32(tmp, cpu_F0s);
2653 static void gen_vfp_msr(TCGv tmp)
2655 tcg_gen_mov_i32(cpu_F0s, tmp);
2660 vfp_enabled(CPUState * env)
2662 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2665 static void gen_neon_dup_u8(TCGv var, int shift)
2667 TCGv tmp = new_tmp();
2669 tcg_gen_shri_i32(var, var, shift);
2670 tcg_gen_ext8u_i32(var, var);
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
2678 static void gen_neon_dup_low16(TCGv var)
2680 TCGv tmp = new_tmp();
2681 tcg_gen_ext16u_i32(var, var);
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
2687 static void gen_neon_dup_high16(TCGv var)
2689 TCGv tmp = new_tmp();
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
2696 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2700 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2705 if (!arm_feature(env, ARM_FEATURE_VFP))
2708 if (!vfp_enabled(env)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2710 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2712 rn = (insn >> 16) & 0xf;
2713 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2714 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2717 dp = ((insn & 0xf00) == 0xb00);
2718 switch ((insn >> 24) & 0xf) {
2720 if (insn & (1 << 4)) {
2721 /* single register transfer */
2722 rd = (insn >> 12) & 0xf;
2727 VFP_DREG_N(rn, insn);
2730 if (insn & 0x00c00060
2731 && !arm_feature(env, ARM_FEATURE_NEON))
2734 pass = (insn >> 21) & 1;
2735 if (insn & (1 << 22)) {
2737 offset = ((insn >> 5) & 3) * 8;
2738 } else if (insn & (1 << 5)) {
2740 offset = (insn & (1 << 6)) ? 16 : 0;
2745 if (insn & ARM_CP_RW_BIT) {
2747 tmp = neon_load_reg(rn, pass);
2751 tcg_gen_shri_i32(tmp, tmp, offset);
2752 if (insn & (1 << 23))
2758 if (insn & (1 << 23)) {
2760 tcg_gen_shri_i32(tmp, tmp, 16);
2766 tcg_gen_sari_i32(tmp, tmp, 16);
2775 store_reg(s, rd, tmp);
2778 tmp = load_reg(s, rd);
2779 if (insn & (1 << 23)) {
2782 gen_neon_dup_u8(tmp, 0);
2783 } else if (size == 1) {
2784 gen_neon_dup_low16(tmp);
2787 tcg_gen_mov_i32(tmp2, tmp);
2788 neon_store_reg(rn, 0, tmp2);
2789 neon_store_reg(rn, 1, tmp);
2794 tmp2 = neon_load_reg(rn, pass);
2795 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2799 tmp2 = neon_load_reg(rn, pass);
2800 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2806 neon_store_reg(rn, pass, tmp);
2810 if ((insn & 0x6f) != 0x00)
2812 rn = VFP_SREG_N(insn);
2813 if (insn & ARM_CP_RW_BIT) {
2815 if (insn & (1 << 21)) {
2816 /* system register */
2821 /* VFP2 allows access to FSID from userspace.
2822 VFP3 restricts all id registers to privileged
2825 && arm_feature(env, ARM_FEATURE_VFP3))
2827 tmp = load_cpu_field(vfp.xregs[rn]);
2832 tmp = load_cpu_field(vfp.xregs[rn]);
2834 case ARM_VFP_FPINST:
2835 case ARM_VFP_FPINST2:
2836 /* Not present in VFP3. */
2838 || arm_feature(env, ARM_FEATURE_VFP3))
2840 tmp = load_cpu_field(vfp.xregs[rn]);
2844 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2845 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2848 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2854 || !arm_feature(env, ARM_FEATURE_VFP3))
2856 tmp = load_cpu_field(vfp.xregs[rn]);
2862 gen_mov_F0_vreg(0, rn);
2863 tmp = gen_vfp_mrs();
2866 /* Set the 4 flag bits in the CPSR. */
2870 store_reg(s, rd, tmp);
2874 tmp = load_reg(s, rd);
2875 if (insn & (1 << 21)) {
2877 /* system register */
2882 /* Writes are ignored. */
2885 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2892 store_cpu_field(tmp, vfp.xregs[rn]);
2895 case ARM_VFP_FPINST:
2896 case ARM_VFP_FPINST2:
2897 store_cpu_field(tmp, vfp.xregs[rn]);
2904 gen_mov_vreg_F0(0, rn);
2909 /* data processing */
2910 /* The opcode is in bits 23, 21, 20 and 6. */
2911 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2915 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2917 /* rn is register number */
2918 VFP_DREG_N(rn, insn);
2921 if (op == 15 && (rn == 15 || rn > 17)) {
2922 /* Integer or single precision destination. */
2923 rd = VFP_SREG_D(insn);
2925 VFP_DREG_D(rd, insn);
2928 if (op == 15 && (rn == 16 || rn == 17)) {
2929 /* Integer source. */
2930 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2932 VFP_DREG_M(rm, insn);
2935 rn = VFP_SREG_N(insn);
2936 if (op == 15 && rn == 15) {
2937 /* Double precision destination. */
2938 VFP_DREG_D(rd, insn);
2940 rd = VFP_SREG_D(insn);
2942 rm = VFP_SREG_M(insn);
2945 veclen = env->vfp.vec_len;
2946 if (op == 15 && rn > 3)
2949 /* Shut up compiler warnings. */
2960 /* Figure out what type of vector operation this is. */
2961 if ((rd & bank_mask) == 0) {
2966 delta_d = (env->vfp.vec_stride >> 1) + 1;
2968 delta_d = env->vfp.vec_stride + 1;
2970 if ((rm & bank_mask) == 0) {
2971 /* mixed scalar/vector */
2980 /* Load the initial operands. */
2985 /* Integer source */
2986 gen_mov_F0_vreg(0, rm);
2991 gen_mov_F0_vreg(dp, rd);
2992 gen_mov_F1_vreg(dp, rm);
2996 /* Compare with zero */
2997 gen_mov_F0_vreg(dp, rd);
3008 /* Source and destination the same. */
3009 gen_mov_F0_vreg(dp, rd);
3012 /* One source operand. */
3013 gen_mov_F0_vreg(dp, rm);
3017 /* Two source operands. */
3018 gen_mov_F0_vreg(dp, rn);
3019 gen_mov_F1_vreg(dp, rm);
3023 /* Perform the calculation. */
3025 case 0: /* mac: fd + (fn * fm) */
3027 gen_mov_F1_vreg(dp, rd);
3030 case 1: /* nmac: fd - (fn * fm) */
3033 gen_mov_F1_vreg(dp, rd);
3036 case 2: /* msc: -fd + (fn * fm) */
3038 gen_mov_F1_vreg(dp, rd);
3041 case 3: /* nmsc: -fd - (fn * fm) */
3044 gen_mov_F1_vreg(dp, rd);
3047 case 4: /* mul: fn * fm */
3050 case 5: /* nmul: -(fn * fm) */
3054 case 6: /* add: fn + fm */
3057 case 7: /* sub: fn - fm */
3060 case 8: /* div: fn / fm */
3063 case 14: /* fconst */
3064 if (!arm_feature(env, ARM_FEATURE_VFP3))
3067 n = (insn << 12) & 0x80000000;
3068 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3075 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3082 tcg_gen_movi_i32(cpu_F0s, n);
3085 case 15: /* extension space */
3108 case 11: /* cmpez */
3112 case 15: /* single<->double conversion */
3114 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3116 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3118 case 16: /* fuito */
3121 case 17: /* fsito */
3124 case 20: /* fshto */
3125 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 gen_vfp_shto(dp, 16 - rm);
3129 case 21: /* fslto */
3130 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 gen_vfp_slto(dp, 32 - rm);
3134 case 22: /* fuhto */
3135 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 gen_vfp_uhto(dp, 16 - rm);
3139 case 23: /* fulto */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 gen_vfp_ulto(dp, 32 - rm);
3144 case 24: /* ftoui */
3147 case 25: /* ftouiz */
3150 case 26: /* ftosi */
3153 case 27: /* ftosiz */
3156 case 28: /* ftosh */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 gen_vfp_tosh(dp, 16 - rm);
3161 case 29: /* ftosl */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 gen_vfp_tosl(dp, 32 - rm);
3166 case 30: /* ftouh */
3167 if (!arm_feature(env, ARM_FEATURE_VFP3))
3169 gen_vfp_touh(dp, 16 - rm);
3171 case 31: /* ftoul */
3172 if (!arm_feature(env, ARM_FEATURE_VFP3))
3174 gen_vfp_toul(dp, 32 - rm);
3176 default: /* undefined */
3177 printf ("rn:%d\n", rn);
3181 default: /* undefined */
3182 printf ("op:%d\n", op);
3186 /* Write back the result. */
3187 if (op == 15 && (rn >= 8 && rn <= 11))
3188 ; /* Comparison, do nothing. */
3189 else if (op == 15 && rn > 17)
3190 /* Integer result. */
3191 gen_mov_vreg_F0(0, rd);
3192 else if (op == 15 && rn == 15)
3194 gen_mov_vreg_F0(!dp, rd);
3196 gen_mov_vreg_F0(dp, rd);
3198 /* break out of the loop if we have finished */
3202 if (op == 15 && delta_m == 0) {
3203 /* single source one-many */
3205 rd = ((rd + delta_d) & (bank_mask - 1))
3207 gen_mov_vreg_F0(dp, rd);
3211 /* Setup the next operands. */
3213 rd = ((rd + delta_d) & (bank_mask - 1))
3217 /* One source operand. */
3218 rm = ((rm + delta_m) & (bank_mask - 1))
3220 gen_mov_F0_vreg(dp, rm);
3222 /* Two source operands. */
3223 rn = ((rn + delta_d) & (bank_mask - 1))
3225 gen_mov_F0_vreg(dp, rn);
3227 rm = ((rm + delta_m) & (bank_mask - 1))
3229 gen_mov_F1_vreg(dp, rm);
3237 if (dp && (insn & 0x03e00000) == 0x00400000) {
3238 /* two-register transfer */
3239 rn = (insn >> 16) & 0xf;
3240 rd = (insn >> 12) & 0xf;
3242 VFP_DREG_M(rm, insn);
3244 rm = VFP_SREG_M(insn);
3247 if (insn & ARM_CP_RW_BIT) {
3250 gen_mov_F0_vreg(0, rm * 2);
3251 tmp = gen_vfp_mrs();
3252 store_reg(s, rd, tmp);
3253 gen_mov_F0_vreg(0, rm * 2 + 1);
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rn, tmp);
3257 gen_mov_F0_vreg(0, rm);
3258 tmp = gen_vfp_mrs();
3259 store_reg(s, rn, tmp);
3260 gen_mov_F0_vreg(0, rm + 1);
3261 tmp = gen_vfp_mrs();
3262 store_reg(s, rd, tmp);
3267 tmp = load_reg(s, rd);
3269 gen_mov_vreg_F0(0, rm * 2);
3270 tmp = load_reg(s, rn);
3272 gen_mov_vreg_F0(0, rm * 2 + 1);
3274 tmp = load_reg(s, rn);
3276 gen_mov_vreg_F0(0, rm);
3277 tmp = load_reg(s, rd);
3279 gen_mov_vreg_F0(0, rm + 1);
3284 rn = (insn >> 16) & 0xf;
3286 VFP_DREG_D(rd, insn);
3288 rd = VFP_SREG_D(insn);
3289 if (s->thumb && rn == 15) {
3290 gen_op_movl_T1_im(s->pc & ~2);
3292 gen_movl_T1_reg(s, rn);
3294 if ((insn & 0x01200000) == 0x01000000) {
3295 /* Single load/store */
3296 offset = (insn & 0xff) << 2;
3297 if ((insn & (1 << 23)) == 0)
3299 gen_op_addl_T1_im(offset);
3300 if (insn & (1 << 20)) {
3302 gen_mov_vreg_F0(dp, rd);
3304 gen_mov_F0_vreg(dp, rd);
3308 /* load/store multiple */
3310 n = (insn >> 1) & 0x7f;
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3321 for (i = 0; i < n; i++) {
3322 if (insn & ARM_CP_RW_BIT) {
3325 gen_mov_vreg_F0(dp, rd + i);
3328 gen_mov_F0_vreg(dp, rd + i);
3331 gen_op_addl_T1_im(offset);
3333 if (insn & (1 << 21)) {
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3343 gen_op_addl_T1_im(offset);
3344 gen_movl_reg_T1(s, rn);
3350 /* Should never happen. */
3356 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3358 TranslationBlock *tb;
3361 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3363 gen_set_pc_im(dest);
3364 tcg_gen_exit_tb((long)tb + n);
3366 gen_set_pc_im(dest);
3371 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3373 if (unlikely(s->singlestep_enabled)) {
3374 /* An indirect jump so that we still trigger the debug exception. */
3379 gen_goto_tb(s, 0, dest);
3380 s->is_jmp = DISAS_TB_JUMP;
3384 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387 tcg_gen_sari_i32(t0, t0, 16);
3391 tcg_gen_sari_i32(t1, t1, 16);
3394 tcg_gen_mul_i32(t0, t0, t1);
3397 /* Return the mask of PSR bits set by a MSR instruction. */
3398 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3402 if (flags & (1 << 0))
3404 if (flags & (1 << 1))
3406 if (flags & (1 << 2))
3408 if (flags & (1 << 3))
3411 /* Mask out undefined bits. */
3412 mask &= ~CPSR_RESERVED;
3413 if (!arm_feature(env, ARM_FEATURE_V6))
3414 mask &= ~(CPSR_E | CPSR_GE);
3415 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3417 /* Mask out execution state bits. */
3420 /* Mask out privileged bits. */
3426 /* Returns nonzero if access to the PSR is not permitted. */
3427 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3431 /* ??? This is also undefined in system mode. */
3435 tmp = load_cpu_field(spsr);
3436 tcg_gen_andi_i32(tmp, tmp, ~mask);
3437 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3438 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3439 store_cpu_field(tmp, spsr);
3441 gen_set_cpsr(cpu_T[0], mask);
3447 /* Generate an old-style exception return. */
3448 static void gen_exception_return(DisasContext *s)
3451 gen_movl_reg_T0(s, 15);
3452 tmp = load_cpu_field(spsr);
3453 gen_set_cpsr(tmp, 0xffffffff);
3455 s->is_jmp = DISAS_UPDATE;
3458 /* Generate a v6 exception return. Marks both values as dead. */
3459 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3461 gen_set_cpsr(cpsr, 0xffffffff);
3463 store_reg(s, 15, pc);
3464 s->is_jmp = DISAS_UPDATE;
3468 gen_set_condexec (DisasContext *s)
3470 if (s->condexec_mask) {
3471 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3472 TCGv tmp = new_tmp();
3473 tcg_gen_movi_i32(tmp, val);
3474 store_cpu_field(tmp, condexec_bits);
3478 static void gen_nop_hint(DisasContext *s, int val)
3482 gen_set_pc_im(s->pc);
3483 s->is_jmp = DISAS_WFI;
3487 /* TODO: Implement SEV and WFE. May help SMP performance. */
3493 /* These macros help make the code more readable when migrating from the
3494 old dyngen helpers. They should probably be removed when
3495 T0/T1 are removed. */
3496 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3497 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3499 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3501 static inline int gen_neon_add(int size)
3504 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3505 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3506 case 2: gen_op_addl_T0_T1(); break;
3512 static inline void gen_neon_rsb(int size)
3515 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3516 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3517 case 2: gen_op_rsbl_T0_T1(); break;
3522 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3523 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3524 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3525 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3526 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3528 /* FIXME: This is wrong. They set the wrong overflow bit. */
3529 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3530 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3531 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3532 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3534 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3535 switch ((size << 1) | u) { \
3537 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3540 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3543 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3546 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3549 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3552 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 default: return 1; \
3557 #define GEN_NEON_INTEGER_OP(name) do { \
3558 switch ((size << 1) | u) { \
3560 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3563 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3566 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3569 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3572 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3575 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 default: return 1; \
3581 gen_neon_movl_scratch_T0(int scratch)
3585 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3586 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3590 gen_neon_movl_scratch_T1(int scratch)
3594 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3595 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3599 gen_neon_movl_T0_scratch(int scratch)
3603 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3604 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3608 gen_neon_movl_T1_scratch(int scratch)
3612 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3613 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3616 static inline void gen_neon_get_scalar(int size, int reg)
3619 NEON_GET_REG(T0, reg >> 1, reg & 1);
3621 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3623 gen_neon_dup_low16(cpu_T[0]);
3625 gen_neon_dup_high16(cpu_T[0]);
3629 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3633 for (n = 0; n < q + 1; n += 2) {
3634 NEON_GET_REG(T0, reg, n);
3635 NEON_GET_REG(T0, reg, n + n);
3637 case 0: gen_helper_neon_unzip_u8(); break;
3638 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3639 case 2: /* no-op */; break;
3642 gen_neon_movl_scratch_T0(tmp + n);
3643 gen_neon_movl_scratch_T1(tmp + n + 1);
3651 } neon_ls_element_type[11] = {
3665 /* Translate a NEON load/store element instruction. Return nonzero if the
3666 instruction is invalid. */
3667 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3683 if (!vfp_enabled(env))
3685 VFP_DREG_D(rd, insn);
3686 rn = (insn >> 16) & 0xf;
3688 load = (insn & (1 << 21)) != 0;
3689 if ((insn & (1 << 23)) == 0) {
3690 /* Load store all elements. */
3691 op = (insn >> 8) & 0xf;
3692 size = (insn >> 6) & 3;
3693 if (op > 10 || size == 3)
3695 nregs = neon_ls_element_type[op].nregs;
3696 interleave = neon_ls_element_type[op].interleave;
3697 gen_movl_T1_reg(s, rn);
3698 stride = (1 << size) * interleave;
3699 for (reg = 0; reg < nregs; reg++) {
3700 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3701 gen_movl_T1_reg(s, rn);
3702 gen_op_addl_T1_im((1 << size) * reg);
3703 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3704 gen_movl_T1_reg(s, rn);
3705 gen_op_addl_T1_im(1 << size);
3707 for (pass = 0; pass < 2; pass++) {
3710 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3711 neon_store_reg(rd, pass, tmp);
3713 tmp = neon_load_reg(rd, pass);
3714 gen_st32(tmp, cpu_T[1], IS_USER(s));
3716 gen_op_addl_T1_im(stride);
3717 } else if (size == 1) {
3719 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3720 gen_op_addl_T1_im(stride);
3721 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3722 gen_op_addl_T1_im(stride);
3723 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3725 neon_store_reg(rd, pass, tmp);
3727 tmp = neon_load_reg(rd, pass);
3729 tcg_gen_shri_i32(tmp2, tmp, 16);
3730 gen_st16(tmp, cpu_T[1], IS_USER(s));
3731 gen_op_addl_T1_im(stride);
3732 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3733 gen_op_addl_T1_im(stride);
3735 } else /* size == 0 */ {
3738 for (n = 0; n < 4; n++) {
3739 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3740 gen_op_addl_T1_im(stride);
3744 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3748 neon_store_reg(rd, pass, tmp2);
3750 tmp2 = neon_load_reg(rd, pass);
3751 for (n = 0; n < 4; n++) {
3754 tcg_gen_mov_i32(tmp, tmp2);
3756 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3758 gen_st8(tmp, cpu_T[1], IS_USER(s));
3759 gen_op_addl_T1_im(stride);
3765 rd += neon_ls_element_type[op].spacing;
3769 size = (insn >> 10) & 3;
3771 /* Load single element to all lanes. */
3774 size = (insn >> 6) & 3;
3775 nregs = ((insn >> 8) & 3) + 1;
3776 stride = (insn & (1 << 5)) ? 2 : 1;
3777 gen_movl_T1_reg(s, rn);
3778 for (reg = 0; reg < nregs; reg++) {
3781 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3782 gen_neon_dup_u8(tmp, 0);
3785 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3786 gen_neon_dup_low16(tmp);
3789 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3793 default: /* Avoid compiler warnings. */
3796 gen_op_addl_T1_im(1 << size);
3798 tcg_gen_mov_i32(tmp2, tmp);
3799 neon_store_reg(rd, 0, tmp2);
3800 neon_store_reg(rd, 1, tmp);
3803 stride = (1 << size) * nregs;
3805 /* Single element. */
3806 pass = (insn >> 7) & 1;
3809 shift = ((insn >> 5) & 3) * 8;
3813 shift = ((insn >> 6) & 1) * 16;
3814 stride = (insn & (1 << 5)) ? 2 : 1;
3818 stride = (insn & (1 << 6)) ? 2 : 1;
3823 nregs = ((insn >> 8) & 3) + 1;
3824 gen_movl_T1_reg(s, rn);
3825 for (reg = 0; reg < nregs; reg++) {
3829 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3832 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3835 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3837 default: /* Avoid compiler warnings. */
3841 tmp2 = neon_load_reg(rd, pass);
3842 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3845 neon_store_reg(rd, pass, tmp);
3846 } else { /* Store */
3847 tmp = neon_load_reg(rd, pass);
3849 tcg_gen_shri_i32(tmp, tmp, shift);
3852 gen_st8(tmp, cpu_T[1], IS_USER(s));
3855 gen_st16(tmp, cpu_T[1], IS_USER(s));
3858 gen_st32(tmp, cpu_T[1], IS_USER(s));
3863 gen_op_addl_T1_im(1 << size);
3865 stride = nregs * (1 << size);
3871 base = load_reg(s, rn);
3873 tcg_gen_addi_i32(base, base, stride);
3876 index = load_reg(s, rm);
3877 tcg_gen_add_i32(base, base, index);
3880 store_reg(s, rn, base);
3885 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3886 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3888 tcg_gen_and_i32(t, t, c);
3889 tcg_gen_bic_i32(f, f, c);
3890 tcg_gen_or_i32(dest, t, f);
3893 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3896 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3897 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3898 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3903 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3906 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3907 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3908 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3913 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3916 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3917 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3918 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3923 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3929 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3930 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3935 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3936 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3943 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3944 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3949 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3950 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3957 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3961 case 0: gen_helper_neon_widen_u8(dest, src); break;
3962 case 1: gen_helper_neon_widen_u16(dest, src); break;
3963 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3968 case 0: gen_helper_neon_widen_s8(dest, src); break;
3969 case 1: gen_helper_neon_widen_s16(dest, src); break;
3970 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3977 static inline void gen_neon_addl(int size)
3980 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3981 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3982 case 2: tcg_gen_add_i64(CPU_V001); break;
3987 static inline void gen_neon_subl(int size)
3990 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3991 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3992 case 2: tcg_gen_sub_i64(CPU_V001); break;
3997 static inline void gen_neon_negl(TCGv_i64 var, int size)
4000 case 0: gen_helper_neon_negl_u16(var, var); break;
4001 case 1: gen_helper_neon_negl_u32(var, var); break;
4002 case 2: gen_helper_neon_negl_u64(var, var); break;
4007 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4010 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4011 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4016 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4020 switch ((size << 1) | u) {
4021 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4022 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4023 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4024 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4026 tmp = gen_muls_i64_i32(a, b);
4027 tcg_gen_mov_i64(dest, tmp);
4030 tmp = gen_mulu_i64_i32(a, b);
4031 tcg_gen_mov_i64(dest, tmp);
4041 /* Translate a NEON data processing instruction. Return nonzero if the
4042 instruction is invalid.
4043 We process data in a mixture of 32-bit and 64-bit chunks.
4044 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4046 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4064 if (!vfp_enabled(env))
4066 q = (insn & (1 << 6)) != 0;
4067 u = (insn >> 24) & 1;
4068 VFP_DREG_D(rd, insn);
4069 VFP_DREG_N(rn, insn);
4070 VFP_DREG_M(rm, insn);
4071 size = (insn >> 20) & 3;
4072 if ((insn & (1 << 23)) == 0) {
4073 /* Three register same length. */
4074 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4075 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4076 || op == 10 || op == 11 || op == 16)) {
4077 /* 64-bit element instructions. */
4078 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4079 neon_load_reg64(cpu_V0, rn + pass);
4080 neon_load_reg64(cpu_V1, rm + pass);
4084 gen_helper_neon_add_saturate_u64(CPU_V001);
4086 gen_helper_neon_add_saturate_s64(CPU_V001);
4091 gen_helper_neon_sub_saturate_u64(CPU_V001);
4093 gen_helper_neon_sub_saturate_s64(CPU_V001);
4098 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4100 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4105 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4108 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4112 case 10: /* VRSHL */
4114 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4116 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4119 case 11: /* VQRSHL */
4121 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4124 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4130 tcg_gen_sub_i64(CPU_V001);
4132 tcg_gen_add_i64(CPU_V001);
4138 neon_store_reg64(cpu_V0, rd + pass);
4145 case 10: /* VRSHL */
4146 case 11: /* VQRSHL */
4149 /* Shift instruction operands are reversed. */
4156 case 20: /* VPMAX */
4157 case 21: /* VPMIN */
4158 case 23: /* VPADD */
4161 case 26: /* VPADD (float) */
4162 pairwise = (u && size < 2);
4164 case 30: /* VPMIN/VPMAX (float) */
4171 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4180 NEON_GET_REG(T0, rn, n);
4181 NEON_GET_REG(T1, rn, n + 1);
4183 NEON_GET_REG(T0, rm, n);
4184 NEON_GET_REG(T1, rm, n + 1);
4188 NEON_GET_REG(T0, rn, pass);
4189 NEON_GET_REG(T1, rm, pass);
4193 GEN_NEON_INTEGER_OP(hadd);
4196 GEN_NEON_INTEGER_OP_ENV(qadd);
4198 case 2: /* VRHADD */
4199 GEN_NEON_INTEGER_OP(rhadd);
4201 case 3: /* Logic ops. */
4202 switch ((u << 2) | size) {
4204 gen_op_andl_T0_T1();
4207 gen_op_bicl_T0_T1();
4217 gen_op_xorl_T0_T1();
4220 tmp = neon_load_reg(rd, pass);
4221 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4225 tmp = neon_load_reg(rd, pass);
4226 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4230 tmp = neon_load_reg(rd, pass);
4231 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4237 GEN_NEON_INTEGER_OP(hsub);
4240 GEN_NEON_INTEGER_OP_ENV(qsub);
4243 GEN_NEON_INTEGER_OP(cgt);
4246 GEN_NEON_INTEGER_OP(cge);
4249 GEN_NEON_INTEGER_OP(shl);
4252 GEN_NEON_INTEGER_OP_ENV(qshl);
4254 case 10: /* VRSHL */
4255 GEN_NEON_INTEGER_OP(rshl);
4257 case 11: /* VQRSHL */
4258 GEN_NEON_INTEGER_OP_ENV(qrshl);
4261 GEN_NEON_INTEGER_OP(max);
4264 GEN_NEON_INTEGER_OP(min);
4267 GEN_NEON_INTEGER_OP(abd);
4270 GEN_NEON_INTEGER_OP(abd);
4271 NEON_GET_REG(T1, rd, pass);
4275 if (!u) { /* VADD */
4276 if (gen_neon_add(size))
4280 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4281 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4282 case 2: gen_op_subl_T0_T1(); break;
4288 if (!u) { /* VTST */
4290 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4291 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4292 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4297 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4298 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4299 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4304 case 18: /* Multiply. */
4306 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4308 case 2: gen_op_mul_T0_T1(); break;
4311 NEON_GET_REG(T1, rd, pass);
4319 if (u) { /* polynomial */
4320 gen_helper_neon_mul_p8(CPU_T001);
4321 } else { /* Integer */
4323 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4324 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4325 case 2: gen_op_mul_T0_T1(); break;
4330 case 20: /* VPMAX */
4331 GEN_NEON_INTEGER_OP(pmax);
4333 case 21: /* VPMIN */
4334 GEN_NEON_INTEGER_OP(pmin);
4336 case 22: /* Hultiply high. */
4337 if (!u) { /* VQDMULH */
4339 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4340 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4343 } else { /* VQRDHMUL */
4345 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4346 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4351 case 23: /* VPADD */
4355 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4356 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4357 case 2: gen_op_addl_T0_T1(); break;
4361 case 26: /* Floating point arithnetic. */
4362 switch ((u << 2) | size) {
4364 gen_helper_neon_add_f32(CPU_T001);
4367 gen_helper_neon_sub_f32(CPU_T001);
4370 gen_helper_neon_add_f32(CPU_T001);
4373 gen_helper_neon_abd_f32(CPU_T001);
4379 case 27: /* Float multiply. */
4380 gen_helper_neon_mul_f32(CPU_T001);
4382 NEON_GET_REG(T1, rd, pass);
4384 gen_helper_neon_add_f32(CPU_T001);
4386 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4390 case 28: /* Float compare. */
4392 gen_helper_neon_ceq_f32(CPU_T001);
4395 gen_helper_neon_cge_f32(CPU_T001);
4397 gen_helper_neon_cgt_f32(CPU_T001);
4400 case 29: /* Float compare absolute. */
4404 gen_helper_neon_acge_f32(CPU_T001);
4406 gen_helper_neon_acgt_f32(CPU_T001);
4408 case 30: /* Float min/max. */
4410 gen_helper_neon_max_f32(CPU_T001);
4412 gen_helper_neon_min_f32(CPU_T001);
4416 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4418 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4423 /* Save the result. For elementwise operations we can put it
4424 straight into the destination register. For pairwise operations
4425 we have to be careful to avoid clobbering the source operands. */
4426 if (pairwise && rd == rm) {
4427 gen_neon_movl_scratch_T0(pass);
4429 NEON_SET_REG(T0, rd, pass);
4433 if (pairwise && rd == rm) {
4434 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4435 gen_neon_movl_T0_scratch(pass);
4436 NEON_SET_REG(T0, rd, pass);
4439 /* End of 3 register same size operations. */
4440 } else if (insn & (1 << 4)) {
4441 if ((insn & 0x00380080) != 0) {
4442 /* Two registers and shift. */
4443 op = (insn >> 8) & 0xf;
4444 if (insn & (1 << 7)) {
4449 while ((insn & (1 << (size + 19))) == 0)
4452 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4453 /* To avoid excessive dumplication of ops we implement shift
4454 by immediate using the variable shift operations. */
4456 /* Shift by immediate:
4457 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4458 /* Right shifts are encoded as N - shift, where N is the
4459 element size in bits. */
4461 shift = shift - (1 << (size + 3));
4469 imm = (uint8_t) shift;
4474 imm = (uint16_t) shift;
4485 for (pass = 0; pass < count; pass++) {
4487 neon_load_reg64(cpu_V0, rm + pass);
4488 tcg_gen_movi_i64(cpu_V1, imm);
4493 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4495 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4500 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4502 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4507 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4509 case 5: /* VSHL, VSLI */
4510 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4514 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4516 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4518 case 7: /* VQSHLU */
4519 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4522 if (op == 1 || op == 3) {
4524 neon_load_reg64(cpu_V0, rd + pass);
4525 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4526 } else if (op == 4 || (op == 5 && u)) {
4528 cpu_abort(env, "VS[LR]I.64 not implemented");
4530 neon_store_reg64(cpu_V0, rd + pass);
4531 } else { /* size < 3 */
4532 /* Operands in T0 and T1. */
4533 gen_op_movl_T1_im(imm);
4534 NEON_GET_REG(T0, rm, pass);
4538 GEN_NEON_INTEGER_OP(shl);
4542 GEN_NEON_INTEGER_OP(rshl);
4547 GEN_NEON_INTEGER_OP(shl);
4549 case 5: /* VSHL, VSLI */
4551 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4552 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4553 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4558 GEN_NEON_INTEGER_OP_ENV(qshl);
4560 case 7: /* VQSHLU */
4562 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4563 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4564 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4570 if (op == 1 || op == 3) {
4572 NEON_GET_REG(T1, rd, pass);
4574 } else if (op == 4 || (op == 5 && u)) {
4579 imm = 0xff >> -shift;
4581 imm = (uint8_t)(0xff << shift);
4587 imm = 0xffff >> -shift;
4589 imm = (uint16_t)(0xffff << shift);
4594 imm = 0xffffffffu >> -shift;
4596 imm = 0xffffffffu << shift;
4601 tmp = neon_load_reg(rd, pass);
4602 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4603 tcg_gen_andi_i32(tmp, tmp, ~imm);
4604 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4606 NEON_SET_REG(T0, rd, pass);
4609 } else if (op < 10) {
4610 /* Shift by immediate and narrow:
4611 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4612 shift = shift - (1 << (size + 3));
4616 imm = (uint16_t)shift;
4618 tmp2 = tcg_const_i32(imm);
4619 TCGV_UNUSED_I64(tmp64);
4622 imm = (uint32_t)shift;
4623 tmp2 = tcg_const_i32(imm);
4624 TCGV_UNUSED_I64(tmp64);
4627 tmp64 = tcg_const_i64(shift);
4634 for (pass = 0; pass < 2; pass++) {
4636 neon_load_reg64(cpu_V0, rm + pass);
4639 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4641 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4644 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4646 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4649 tmp = neon_load_reg(rm + pass, 0);
4650 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4651 tmp3 = neon_load_reg(rm + pass, 1);
4652 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4653 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4658 if (op == 8 && !u) {
4659 gen_neon_narrow(size - 1, tmp, cpu_V0);
4662 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4664 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4669 neon_store_reg(rd, 0, tmp2);
4670 neon_store_reg(rd, 1, tmp);
4673 } else if (op == 10) {
4677 tmp = neon_load_reg(rm, 0);
4678 tmp2 = neon_load_reg(rm, 1);
4679 for (pass = 0; pass < 2; pass++) {
4683 gen_neon_widen(cpu_V0, tmp, size, u);
4686 /* The shift is less than the width of the source
4687 type, so we can just shift the whole register. */
4688 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4689 if (size < 2 || !u) {
4692 imm = (0xffu >> (8 - shift));
4695 imm = 0xffff >> (16 - shift);
4697 imm64 = imm | (((uint64_t)imm) << 32);
4698 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4701 neon_store_reg64(cpu_V0, rd + pass);
4703 } else if (op == 15 || op == 16) {
4704 /* VCVT fixed-point. */
4705 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4706 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4709 gen_vfp_ulto(0, shift);
4711 gen_vfp_slto(0, shift);
4714 gen_vfp_toul(0, shift);
4716 gen_vfp_tosl(0, shift);
4718 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4723 } else { /* (insn & 0x00380080) == 0 */
4726 op = (insn >> 8) & 0xf;
4727 /* One register and immediate. */
4728 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4729 invert = (insn & (1 << 5)) != 0;
4747 imm = (imm << 8) | (imm << 24);
4750 imm = (imm < 8) | 0xff;
4753 imm = (imm << 16) | 0xffff;
4756 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4761 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4762 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4768 if (op != 14 || !invert)
4769 gen_op_movl_T1_im(imm);
4771 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4772 if (op & 1 && op < 12) {
4773 tmp = neon_load_reg(rd, pass);
4775 /* The immediate value has already been inverted, so
4777 tcg_gen_andi_i32(tmp, tmp, imm);
4779 tcg_gen_ori_i32(tmp, tmp, imm);
4784 if (op == 14 && invert) {
4787 for (n = 0; n < 4; n++) {
4788 if (imm & (1 << (n + (pass & 1) * 4)))
4789 val |= 0xff << (n * 8);
4791 tcg_gen_movi_i32(tmp, val);
4793 tcg_gen_movi_i32(tmp, imm);
4796 neon_store_reg(rd, pass, tmp);
4799 } else { /* (insn & 0x00800010 == 0x00800000) */
4801 op = (insn >> 8) & 0xf;
4802 if ((insn & (1 << 6)) == 0) {
4803 /* Three registers of different lengths. */
4807 /* prewiden, src1_wide, src2_wide */
4808 static const int neon_3reg_wide[16][3] = {
4809 {1, 0, 0}, /* VADDL */
4810 {1, 1, 0}, /* VADDW */
4811 {1, 0, 0}, /* VSUBL */
4812 {1, 1, 0}, /* VSUBW */
4813 {0, 1, 1}, /* VADDHN */
4814 {0, 0, 0}, /* VABAL */
4815 {0, 1, 1}, /* VSUBHN */
4816 {0, 0, 0}, /* VABDL */
4817 {0, 0, 0}, /* VMLAL */
4818 {0, 0, 0}, /* VQDMLAL */
4819 {0, 0, 0}, /* VMLSL */
4820 {0, 0, 0}, /* VQDMLSL */
4821 {0, 0, 0}, /* Integer VMULL */
4822 {0, 0, 0}, /* VQDMULL */
4823 {0, 0, 0} /* Polynomial VMULL */
4826 prewiden = neon_3reg_wide[op][0];
4827 src1_wide = neon_3reg_wide[op][1];
4828 src2_wide = neon_3reg_wide[op][2];
4830 if (size == 0 && (op == 9 || op == 11 || op == 13))
4833 /* Avoid overlapping operands. Wide source operands are
4834 always aligned so will never overlap with wide
4835 destinations in problematic ways. */
4836 if (rd == rm && !src2_wide) {
4837 NEON_GET_REG(T0, rm, 1);
4838 gen_neon_movl_scratch_T0(2);
4839 } else if (rd == rn && !src1_wide) {
4840 NEON_GET_REG(T0, rn, 1);
4841 gen_neon_movl_scratch_T0(2);
4844 for (pass = 0; pass < 2; pass++) {
4846 neon_load_reg64(cpu_V0, rn + pass);
4849 if (pass == 1 && rd == rn) {
4850 gen_neon_movl_T0_scratch(2);
4852 tcg_gen_mov_i32(tmp, cpu_T[0]);
4854 tmp = neon_load_reg(rn, pass);
4857 gen_neon_widen(cpu_V0, tmp, size, u);
4861 neon_load_reg64(cpu_V1, rm + pass);
4864 if (pass == 1 && rd == rm) {
4865 gen_neon_movl_T0_scratch(2);
4867 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4869 tmp2 = neon_load_reg(rm, pass);
4872 gen_neon_widen(cpu_V1, tmp2, size, u);
4876 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4877 gen_neon_addl(size);
4879 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4880 gen_neon_subl(size);
4882 case 5: case 7: /* VABAL, VABDL */
4883 switch ((size << 1) | u) {
4885 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4888 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4891 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4894 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4897 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4900 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4907 case 8: case 9: case 10: case 11: case 12: case 13:
4908 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4909 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4911 case 14: /* Polynomial VMULL */
4912 cpu_abort(env, "Polynomial VMULL not implemented");
4914 default: /* 15 is RESERVED. */
4917 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4919 if (op == 10 || op == 11) {
4920 gen_neon_negl(cpu_V0, size);
4924 neon_load_reg64(cpu_V1, rd + pass);
4928 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4929 gen_neon_addl(size);
4931 case 9: case 11: /* VQDMLAL, VQDMLSL */
4932 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4933 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4936 case 13: /* VQDMULL */
4937 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4942 neon_store_reg64(cpu_V0, rd + pass);
4943 } else if (op == 4 || op == 6) {
4944 /* Narrowing operation. */
4949 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4952 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4955 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4956 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4963 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4966 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4969 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4970 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4971 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4979 neon_store_reg(rd, 0, tmp3);
4980 neon_store_reg(rd, 1, tmp);
4983 /* Write back the result. */
4984 neon_store_reg64(cpu_V0, rd + pass);
4988 /* Two registers and a scalar. */
4990 case 0: /* Integer VMLA scalar */
4991 case 1: /* Float VMLA scalar */
4992 case 4: /* Integer VMLS scalar */
4993 case 5: /* Floating point VMLS scalar */
4994 case 8: /* Integer VMUL scalar */
4995 case 9: /* Floating point VMUL scalar */
4996 case 12: /* VQDMULH scalar */
4997 case 13: /* VQRDMULH scalar */
4998 gen_neon_get_scalar(size, rm);
4999 gen_neon_movl_scratch_T0(0);
5000 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5002 gen_neon_movl_T0_scratch(0);
5003 NEON_GET_REG(T1, rn, pass);
5006 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5008 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5010 } else if (op == 13) {
5012 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5014 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5016 } else if (op & 1) {
5017 gen_helper_neon_mul_f32(CPU_T001);
5020 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5021 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5022 case 2: gen_op_mul_T0_T1(); break;
5028 NEON_GET_REG(T1, rd, pass);
5034 gen_helper_neon_add_f32(CPU_T001);
5040 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5046 NEON_SET_REG(T0, rd, pass);
5049 case 2: /* VMLAL sclar */
5050 case 3: /* VQDMLAL scalar */
5051 case 6: /* VMLSL scalar */
5052 case 7: /* VQDMLSL scalar */
5053 case 10: /* VMULL scalar */
5054 case 11: /* VQDMULL scalar */
5055 if (size == 0 && (op == 3 || op == 7 || op == 11))
5058 gen_neon_get_scalar(size, rm);
5059 NEON_GET_REG(T1, rn, 1);
5061 for (pass = 0; pass < 2; pass++) {
5063 tmp = neon_load_reg(rn, 0);
5066 tcg_gen_mov_i32(tmp, cpu_T[1]);
5069 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5070 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5071 if (op == 6 || op == 7) {
5072 gen_neon_negl(cpu_V0, size);
5075 neon_load_reg64(cpu_V1, rd + pass);
5079 gen_neon_addl(size);
5082 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5083 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5089 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5094 neon_store_reg64(cpu_V0, rd + pass);
5097 default: /* 14 and 15 are RESERVED */
5101 } else { /* size == 3 */
5104 imm = (insn >> 8) & 0xf;
5111 neon_load_reg64(cpu_V0, rn);
5113 neon_load_reg64(cpu_V1, rn + 1);
5115 } else if (imm == 8) {
5116 neon_load_reg64(cpu_V0, rn + 1);
5118 neon_load_reg64(cpu_V1, rm);
5121 tmp64 = tcg_temp_new_i64();
5123 neon_load_reg64(cpu_V0, rn);
5124 neon_load_reg64(tmp64, rn + 1);
5126 neon_load_reg64(cpu_V0, rn + 1);
5127 neon_load_reg64(tmp64, rm);
5129 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5130 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5131 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5133 neon_load_reg64(cpu_V1, rm);
5135 neon_load_reg64(cpu_V1, rm + 1);
5138 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5139 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5140 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5143 neon_load_reg64(cpu_V0, rn);
5144 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5145 neon_load_reg64(cpu_V1, rm);
5146 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5147 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5149 neon_store_reg64(cpu_V0, rd);
5151 neon_store_reg64(cpu_V1, rd + 1);
5153 } else if ((insn & (1 << 11)) == 0) {
5154 /* Two register misc. */
5155 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5156 size = (insn >> 18) & 3;
5158 case 0: /* VREV64 */
5161 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5162 NEON_GET_REG(T0, rm, pass * 2);
5163 NEON_GET_REG(T1, rm, pass * 2 + 1);
5165 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5166 case 1: gen_swap_half(cpu_T[0]); break;
5167 case 2: /* no-op */ break;
5170 NEON_SET_REG(T0, rd, pass * 2 + 1);
5172 NEON_SET_REG(T1, rd, pass * 2);
5174 gen_op_movl_T0_T1();
5176 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5177 case 1: gen_swap_half(cpu_T[0]); break;
5180 NEON_SET_REG(T0, rd, pass * 2);
5184 case 4: case 5: /* VPADDL */
5185 case 12: case 13: /* VPADAL */
5188 for (pass = 0; pass < q + 1; pass++) {
5189 tmp = neon_load_reg(rm, pass * 2);
5190 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5191 tmp = neon_load_reg(rm, pass * 2 + 1);
5192 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5194 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5195 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5196 case 2: tcg_gen_add_i64(CPU_V001); break;
5201 neon_load_reg64(cpu_V1, rd + pass);
5202 gen_neon_addl(size);
5204 neon_store_reg64(cpu_V0, rd + pass);
5209 for (n = 0; n < (q ? 4 : 2); n += 2) {
5210 NEON_GET_REG(T0, rm, n);
5211 NEON_GET_REG(T1, rd, n + 1);
5212 NEON_SET_REG(T1, rm, n);
5213 NEON_SET_REG(T0, rd, n + 1);
5221 Rd A3 A2 A1 A0 B2 B0 A2 A0
5222 Rm B3 B2 B1 B0 B3 B1 A3 A1
5226 gen_neon_unzip(rd, q, 0, size);
5227 gen_neon_unzip(rm, q, 4, size);
5229 static int unzip_order_q[8] =
5230 {0, 2, 4, 6, 1, 3, 5, 7};
5231 for (n = 0; n < 8; n++) {
5232 int reg = (n < 4) ? rd : rm;
5233 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5234 NEON_SET_REG(T0, reg, n % 4);
5237 static int unzip_order[4] =
5239 for (n = 0; n < 4; n++) {
5240 int reg = (n < 2) ? rd : rm;
5241 gen_neon_movl_T0_scratch(unzip_order[n]);
5242 NEON_SET_REG(T0, reg, n % 2);
5248 Rd A3 A2 A1 A0 B1 A1 B0 A0
5249 Rm B3 B2 B1 B0 B3 A3 B2 A2
5253 count = (q ? 4 : 2);
5254 for (n = 0; n < count; n++) {
5255 NEON_GET_REG(T0, rd, n);
5256 NEON_GET_REG(T1, rd, n);
5258 case 0: gen_helper_neon_zip_u8(); break;
5259 case 1: gen_helper_neon_zip_u16(); break;
5260 case 2: /* no-op */; break;
5263 gen_neon_movl_scratch_T0(n * 2);
5264 gen_neon_movl_scratch_T1(n * 2 + 1);
5266 for (n = 0; n < count * 2; n++) {
5267 int reg = (n < count) ? rd : rm;
5268 gen_neon_movl_T0_scratch(n);
5269 NEON_SET_REG(T0, reg, n % count);
5272 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5276 for (pass = 0; pass < 2; pass++) {
5277 neon_load_reg64(cpu_V0, rm + pass);
5279 if (op == 36 && q == 0) {
5280 gen_neon_narrow(size, tmp, cpu_V0);
5282 gen_neon_narrow_satu(size, tmp, cpu_V0);
5284 gen_neon_narrow_sats(size, tmp, cpu_V0);
5289 neon_store_reg(rd, 0, tmp2);
5290 neon_store_reg(rd, 1, tmp);
5294 case 38: /* VSHLL */
5297 tmp = neon_load_reg(rm, 0);
5298 tmp2 = neon_load_reg(rm, 1);
5299 for (pass = 0; pass < 2; pass++) {
5302 gen_neon_widen(cpu_V0, tmp, size, 1);
5303 neon_store_reg64(cpu_V0, rd + pass);
5308 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5309 if (op == 30 || op == 31 || op >= 58) {
5310 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5311 neon_reg_offset(rm, pass));
5313 NEON_GET_REG(T0, rm, pass);
5316 case 1: /* VREV32 */
5318 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5319 case 1: gen_swap_half(cpu_T[0]); break;
5323 case 2: /* VREV16 */
5326 gen_rev16(cpu_T[0]);
5330 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5331 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5332 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5338 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5339 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5340 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5347 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5354 case 14: /* VQABS */
5356 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5357 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5358 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5362 case 15: /* VQNEG */
5364 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5365 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5366 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5370 case 16: case 19: /* VCGT #0, VCLE #0 */
5371 gen_op_movl_T1_im(0);
5373 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5374 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5375 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5381 case 17: case 20: /* VCGE #0, VCLT #0 */
5382 gen_op_movl_T1_im(0);
5384 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5385 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5386 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5392 case 18: /* VCEQ #0 */
5393 gen_op_movl_T1_im(0);
5395 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5396 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5397 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5403 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5404 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5405 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5410 gen_op_movl_T1_im(0);
5415 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5416 gen_op_movl_T1_im(0);
5417 gen_helper_neon_cgt_f32(CPU_T001);
5421 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5422 gen_op_movl_T1_im(0);
5423 gen_helper_neon_cge_f32(CPU_T001);
5427 case 26: /* Float VCEQ #0 */
5428 gen_op_movl_T1_im(0);
5429 gen_helper_neon_ceq_f32(CPU_T001);
5431 case 30: /* Float VABS */
5434 case 31: /* Float VNEG */
5438 NEON_GET_REG(T1, rd, pass);
5439 NEON_SET_REG(T1, rm, pass);
5442 NEON_GET_REG(T1, rd, pass);
5444 case 0: gen_helper_neon_trn_u8(); break;
5445 case 1: gen_helper_neon_trn_u16(); break;
5449 NEON_SET_REG(T1, rm, pass);
5451 case 56: /* Integer VRECPE */
5452 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5454 case 57: /* Integer VRSQRTE */
5455 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5457 case 58: /* Float VRECPE */
5458 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5460 case 59: /* Float VRSQRTE */
5461 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5463 case 60: /* VCVT.F32.S32 */
5466 case 61: /* VCVT.F32.U32 */
5469 case 62: /* VCVT.S32.F32 */
5472 case 63: /* VCVT.U32.F32 */
5476 /* Reserved: 21, 29, 39-56 */
5479 if (op == 30 || op == 31 || op >= 58) {
5480 tcg_gen_st_f32(cpu_F0s, cpu_env,
5481 neon_reg_offset(rd, pass));
5483 NEON_SET_REG(T0, rd, pass);
5488 } else if ((insn & (1 << 10)) == 0) {
5490 n = ((insn >> 5) & 0x18) + 8;
5491 if (insn & (1 << 6)) {
5492 tmp = neon_load_reg(rd, 0);
5495 tcg_gen_movi_i32(tmp, 0);
5497 tmp2 = neon_load_reg(rm, 0);
5498 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5501 if (insn & (1 << 6)) {
5502 tmp = neon_load_reg(rd, 1);
5505 tcg_gen_movi_i32(tmp, 0);
5507 tmp3 = neon_load_reg(rm, 1);
5508 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5510 neon_store_reg(rd, 0, tmp2);
5511 neon_store_reg(rd, 1, tmp3);
5513 } else if ((insn & 0x380) == 0) {
5515 if (insn & (1 << 19)) {
5516 NEON_SET_REG(T0, rm, 1);
5518 NEON_SET_REG(T0, rm, 0);
5520 if (insn & (1 << 16)) {
5521 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5522 } else if (insn & (1 << 17)) {
5523 if ((insn >> 18) & 1)
5524 gen_neon_dup_high16(cpu_T[0]);
5526 gen_neon_dup_low16(cpu_T[0]);
5528 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5529 NEON_SET_REG(T0, rd, pass);
5539 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5541 int crn = (insn >> 16) & 0xf;
5542 int crm = insn & 0xf;
5543 int op1 = (insn >> 21) & 7;
5544 int op2 = (insn >> 5) & 7;
5545 int rt = (insn >> 12) & 0xf;
5548 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5549 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5553 tmp = load_cpu_field(teecr);
5554 store_reg(s, rt, tmp);
5557 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5559 if (IS_USER(s) && (env->teecr & 1))
5561 tmp = load_cpu_field(teehbr);
5562 store_reg(s, rt, tmp);
5566 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5567 op1, crn, crm, op2);
5571 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5573 int crn = (insn >> 16) & 0xf;
5574 int crm = insn & 0xf;
5575 int op1 = (insn >> 21) & 7;
5576 int op2 = (insn >> 5) & 7;
5577 int rt = (insn >> 12) & 0xf;
5580 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5581 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5585 tmp = load_reg(s, rt);
5586 gen_helper_set_teecr(cpu_env, tmp);
5590 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5592 if (IS_USER(s) && (env->teecr & 1))
5594 tmp = load_reg(s, rt);
5595 store_cpu_field(tmp, teehbr);
5599 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5600 op1, crn, crm, op2);
5604 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5608 cpnum = (insn >> 8) & 0xf;
5609 if (arm_feature(env, ARM_FEATURE_XSCALE)
5610 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5616 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5617 return disas_iwmmxt_insn(env, s, insn);
5618 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5619 return disas_dsp_insn(env, s, insn);
5624 return disas_vfp_insn (env, s, insn);
5626 /* Coprocessors 7-15 are architecturally reserved by ARM.
5627 Unfortunately Intel decided to ignore this. */
5628 if (arm_feature(env, ARM_FEATURE_XSCALE))
5630 if (insn & (1 << 20))
5631 return disas_cp14_read(env, s, insn);
5633 return disas_cp14_write(env, s, insn);
5635 return disas_cp15_insn (env, s, insn);
5638 /* Unknown coprocessor. See if the board has hooked it. */
5639 return disas_cp_insn (env, s, insn);
5644 /* Store a 64-bit value to a register pair. Clobbers val. */
5645 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5649 tcg_gen_trunc_i64_i32(tmp, val);
5650 store_reg(s, rlow, tmp);
5652 tcg_gen_shri_i64(val, val, 32);
5653 tcg_gen_trunc_i64_i32(tmp, val);
5654 store_reg(s, rhigh, tmp);
5657 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5658 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5663 /* Load value and extend to 64 bits. */
5664 tmp = tcg_temp_new_i64();
5665 tmp2 = load_reg(s, rlow);
5666 tcg_gen_extu_i32_i64(tmp, tmp2);
5668 tcg_gen_add_i64(val, val, tmp);
5671 /* load and add a 64-bit value from a register pair. */
5672 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5678 /* Load 64-bit value rd:rn. */
5679 tmpl = load_reg(s, rlow);
5680 tmph = load_reg(s, rhigh);
5681 tmp = tcg_temp_new_i64();
5682 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5685 tcg_gen_add_i64(val, val, tmp);
5688 /* Set N and Z flags from a 64-bit value. */
5689 static void gen_logicq_cc(TCGv_i64 val)
5691 TCGv tmp = new_tmp();
5692 gen_helper_logicq_cc(tmp, val);
5697 static void disas_arm_insn(CPUState * env, DisasContext *s)
5699 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5706 insn = ldl_code(s->pc);
5709 /* M variants do not implement ARM mode. */
5714 /* Unconditional instructions. */
5715 if (((insn >> 25) & 7) == 1) {
5716 /* NEON Data processing. */
5717 if (!arm_feature(env, ARM_FEATURE_NEON))
5720 if (disas_neon_data_insn(env, s, insn))
5724 if ((insn & 0x0f100000) == 0x04000000) {
5725 /* NEON load/store. */
5726 if (!arm_feature(env, ARM_FEATURE_NEON))
5729 if (disas_neon_ls_insn(env, s, insn))
5733 if ((insn & 0x0d70f000) == 0x0550f000)
5735 else if ((insn & 0x0ffffdff) == 0x01010000) {
5738 if (insn & (1 << 9)) {
5739 /* BE8 mode not implemented. */
5743 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5744 switch ((insn >> 4) & 0xf) {
5747 gen_helper_clrex(cpu_env);
5753 /* We don't emulate caches so these are a no-op. */
5758 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5764 op1 = (insn & 0x1f);
5765 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5766 addr = load_reg(s, 13);
5769 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5771 i = (insn >> 23) & 3;
5773 case 0: offset = -4; break; /* DA */
5774 case 1: offset = -8; break; /* DB */
5775 case 2: offset = 0; break; /* IA */
5776 case 3: offset = 4; break; /* IB */
5780 tcg_gen_addi_i32(addr, addr, offset);
5781 tmp = load_reg(s, 14);
5782 gen_st32(tmp, addr, 0);
5784 gen_helper_cpsr_read(tmp);
5785 tcg_gen_addi_i32(addr, addr, 4);
5786 gen_st32(tmp, addr, 0);
5787 if (insn & (1 << 21)) {
5788 /* Base writeback. */
5790 case 0: offset = -8; break;
5791 case 1: offset = -4; break;
5792 case 2: offset = 4; break;
5793 case 3: offset = 0; break;
5797 tcg_gen_addi_i32(addr, tmp, offset);
5798 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5799 gen_movl_reg_T1(s, 13);
5801 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5806 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5812 rn = (insn >> 16) & 0xf;
5813 addr = load_reg(s, rn);
5814 i = (insn >> 23) & 3;
5816 case 0: offset = -4; break; /* DA */
5817 case 1: offset = -8; break; /* DB */
5818 case 2: offset = 0; break; /* IA */
5819 case 3: offset = 4; break; /* IB */
5823 tcg_gen_addi_i32(addr, addr, offset);
5824 /* Load PC into tmp and CPSR into tmp2. */
5825 tmp = gen_ld32(addr, 0);
5826 tcg_gen_addi_i32(addr, addr, 4);
5827 tmp2 = gen_ld32(addr, 0);
5828 if (insn & (1 << 21)) {
5829 /* Base writeback. */
5831 case 0: offset = -8; break;
5832 case 1: offset = -4; break;
5833 case 2: offset = 4; break;
5834 case 3: offset = 0; break;
5838 tcg_gen_addi_i32(addr, addr, offset);
5839 store_reg(s, rn, addr);
5843 gen_rfe(s, tmp, tmp2);
5844 } else if ((insn & 0x0e000000) == 0x0a000000) {
5845 /* branch link and change to thumb (blx <offset>) */
5848 val = (uint32_t)s->pc;
5850 tcg_gen_movi_i32(tmp, val);
5851 store_reg(s, 14, tmp);
5852 /* Sign-extend the 24-bit offset */
5853 offset = (((int32_t)insn) << 8) >> 8;
5854 /* offset * 4 + bit24 * 2 + (thumb bit) */
5855 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5856 /* pipeline offset */
5860 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5861 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5862 /* iWMMXt register transfer. */
5863 if (env->cp15.c15_cpar & (1 << 1))
5864 if (!disas_iwmmxt_insn(env, s, insn))
5867 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5868 /* Coprocessor double register transfer. */
5869 } else if ((insn & 0x0f000010) == 0x0e000010) {
5870 /* Additional coprocessor register transfer. */
5871 } else if ((insn & 0x0ff10020) == 0x01000000) {
5874 /* cps (privileged) */
5878 if (insn & (1 << 19)) {
5879 if (insn & (1 << 8))
5881 if (insn & (1 << 7))
5883 if (insn & (1 << 6))
5885 if (insn & (1 << 18))
5888 if (insn & (1 << 17)) {
5890 val |= (insn & 0x1f);
5893 gen_op_movl_T0_im(val);
5894 gen_set_psr_T0(s, mask, 0);
5901 /* if not always execute, we generate a conditional jump to
5903 s->condlabel = gen_new_label();
5904 gen_test_cc(cond ^ 1, s->condlabel);
5907 if ((insn & 0x0f900000) == 0x03000000) {
5908 if ((insn & (1 << 21)) == 0) {
5910 rd = (insn >> 12) & 0xf;
5911 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5912 if ((insn & (1 << 22)) == 0) {
5915 tcg_gen_movi_i32(tmp, val);
5918 tmp = load_reg(s, rd);
5919 tcg_gen_ext16u_i32(tmp, tmp);
5920 tcg_gen_ori_i32(tmp, tmp, val << 16);
5922 store_reg(s, rd, tmp);
5924 if (((insn >> 12) & 0xf) != 0xf)
5926 if (((insn >> 16) & 0xf) == 0) {
5927 gen_nop_hint(s, insn & 0xff);
5929 /* CPSR = immediate */
5931 shift = ((insn >> 8) & 0xf) * 2;
5933 val = (val >> shift) | (val << (32 - shift));
5934 gen_op_movl_T0_im(val);
5935 i = ((insn & (1 << 22)) != 0);
5936 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5940 } else if ((insn & 0x0f900000) == 0x01000000
5941 && (insn & 0x00000090) != 0x00000090) {
5942 /* miscellaneous instructions */
5943 op1 = (insn >> 21) & 3;
5944 sh = (insn >> 4) & 0xf;
5947 case 0x0: /* move program status register */
5950 gen_movl_T0_reg(s, rm);
5951 i = ((op1 & 2) != 0);
5952 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5956 rd = (insn >> 12) & 0xf;
5960 tmp = load_cpu_field(spsr);
5963 gen_helper_cpsr_read(tmp);
5965 store_reg(s, rd, tmp);
5970 /* branch/exchange thumb (bx). */
5971 tmp = load_reg(s, rm);
5973 } else if (op1 == 3) {
5975 rd = (insn >> 12) & 0xf;
5976 tmp = load_reg(s, rm);
5977 gen_helper_clz(tmp, tmp);
5978 store_reg(s, rd, tmp);
5986 /* Trivial implementation equivalent to bx. */
5987 tmp = load_reg(s, rm);
5997 /* branch link/exchange thumb (blx) */
5998 tmp = load_reg(s, rm);
6000 tcg_gen_movi_i32(tmp2, s->pc);
6001 store_reg(s, 14, tmp2);
6004 case 0x5: /* saturating add/subtract */
6005 rd = (insn >> 12) & 0xf;
6006 rn = (insn >> 16) & 0xf;
6007 tmp = load_reg(s, rm);
6008 tmp2 = load_reg(s, rn);
6010 gen_helper_double_saturate(tmp2, tmp2);
6012 gen_helper_sub_saturate(tmp, tmp, tmp2);
6014 gen_helper_add_saturate(tmp, tmp, tmp2);
6016 store_reg(s, rd, tmp);
6021 gen_set_condexec(s);
6022 gen_set_pc_im(s->pc - 4);
6023 gen_exception(EXCP_BKPT);
6024 s->is_jmp = DISAS_JUMP;
6025 } else if (op1 == 3) {
6027 if (!(env->cp15.c0_c2[4] & 0xf000) || IS_USER(s))
6029 /* TODO: real implementation; execute as NOP for now */
6030 /*fprintf(stderr, "smc [0x%08x] pc=0x%08x\n", insn, s->pc);*/
6035 case 0x8: /* signed multiply */
6039 rs = (insn >> 8) & 0xf;
6040 rn = (insn >> 12) & 0xf;
6041 rd = (insn >> 16) & 0xf;
6043 /* (32 * 16) >> 16 */
6044 tmp = load_reg(s, rm);
6045 tmp2 = load_reg(s, rs);
6047 tcg_gen_sari_i32(tmp2, tmp2, 16);
6050 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6051 tcg_gen_shri_i64(tmp64, tmp64, 16);
6053 tcg_gen_trunc_i64_i32(tmp, tmp64);
6054 if ((sh & 2) == 0) {
6055 tmp2 = load_reg(s, rn);
6056 gen_helper_add_setq(tmp, tmp, tmp2);
6059 store_reg(s, rd, tmp);
6062 tmp = load_reg(s, rm);
6063 tmp2 = load_reg(s, rs);
6064 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6067 tmp64 = tcg_temp_new_i64();
6068 tcg_gen_ext_i32_i64(tmp64, tmp);
6070 gen_addq(s, tmp64, rn, rd);
6071 gen_storeq_reg(s, rn, rd, tmp64);
6074 tmp2 = load_reg(s, rn);
6075 gen_helper_add_setq(tmp, tmp, tmp2);
6078 store_reg(s, rd, tmp);
6085 } else if (((insn & 0x0e000000) == 0 &&
6086 (insn & 0x00000090) != 0x90) ||
6087 ((insn & 0x0e000000) == (1 << 25))) {
6088 int set_cc, logic_cc, shiftop;
6090 op1 = (insn >> 21) & 0xf;
6091 set_cc = (insn >> 20) & 1;
6092 logic_cc = table_logic_cc[op1] & set_cc;
6094 /* data processing instruction */
6095 if (insn & (1 << 25)) {
6096 /* immediate operand */
6098 shift = ((insn >> 8) & 0xf) * 2;
6100 val = (val >> shift) | (val << (32 - shift));
6101 gen_op_movl_T1_im(val);
6102 if (logic_cc && shift)
6103 gen_set_CF_bit31(cpu_T[1]);
6107 gen_movl_T1_reg(s, rm);
6108 shiftop = (insn >> 5) & 3;
6109 if (!(insn & (1 << 4))) {
6110 shift = (insn >> 7) & 0x1f;
6111 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
6113 rs = (insn >> 8) & 0xf;
6114 tmp = load_reg(s, rs);
6115 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
6118 if (op1 != 0x0f && op1 != 0x0d) {
6119 rn = (insn >> 16) & 0xf;
6120 gen_movl_T0_reg(s, rn);
6122 rd = (insn >> 12) & 0xf;
6125 gen_op_andl_T0_T1();
6126 gen_movl_reg_T0(s, rd);
6128 gen_op_logic_T0_cc();
6131 gen_op_xorl_T0_T1();
6132 gen_movl_reg_T0(s, rd);
6134 gen_op_logic_T0_cc();
6137 if (set_cc && rd == 15) {
6138 /* SUBS r15, ... is used for exception return. */
6141 gen_op_subl_T0_T1_cc();
6142 gen_exception_return(s);
6145 gen_op_subl_T0_T1_cc();
6147 gen_op_subl_T0_T1();
6148 gen_movl_reg_T0(s, rd);
6153 gen_op_rsbl_T0_T1_cc();
6155 gen_op_rsbl_T0_T1();
6156 gen_movl_reg_T0(s, rd);
6160 gen_op_addl_T0_T1_cc();
6162 gen_op_addl_T0_T1();
6163 gen_movl_reg_T0(s, rd);
6167 gen_op_adcl_T0_T1_cc();
6170 gen_movl_reg_T0(s, rd);
6174 gen_op_sbcl_T0_T1_cc();
6177 gen_movl_reg_T0(s, rd);
6181 gen_op_rscl_T0_T1_cc();
6184 gen_movl_reg_T0(s, rd);
6188 gen_op_andl_T0_T1();
6189 gen_op_logic_T0_cc();
6194 gen_op_xorl_T0_T1();
6195 gen_op_logic_T0_cc();
6200 gen_op_subl_T0_T1_cc();
6205 gen_op_addl_T0_T1_cc();
6210 gen_movl_reg_T0(s, rd);
6212 gen_op_logic_T0_cc();
6217 /* MOVS r15, ... is used for exception return. */
6220 gen_op_movl_T0_T1();
6221 gen_exception_return(s);
6223 } else if (ENABLE_ARCH_7) {
6225 tcg_gen_mov_i32(tmp, cpu_T[1]);
6230 gen_movl_reg_T1(s, rd);
6232 gen_op_logic_T1_cc();
6235 gen_op_bicl_T0_T1();
6236 gen_movl_reg_T0(s, rd);
6238 gen_op_logic_T0_cc();
6243 gen_movl_reg_T1(s, rd);
6245 gen_op_logic_T1_cc();
6249 /* other instructions */
6250 op1 = (insn >> 24) & 0xf;
6254 /* multiplies, extra load/stores */
6255 sh = (insn >> 5) & 3;
6258 rd = (insn >> 16) & 0xf;
6259 rn = (insn >> 12) & 0xf;
6260 rs = (insn >> 8) & 0xf;
6262 op1 = (insn >> 20) & 0xf;
6264 case 0: case 1: case 2: case 3: case 6:
6266 tmp = load_reg(s, rs);
6267 tmp2 = load_reg(s, rm);
6268 tcg_gen_mul_i32(tmp, tmp, tmp2);
6270 if (insn & (1 << 22)) {
6271 /* Subtract (mls) */
6273 tmp2 = load_reg(s, rn);
6274 tcg_gen_sub_i32(tmp, tmp2, tmp);
6276 } else if (insn & (1 << 21)) {
6278 tmp2 = load_reg(s, rn);
6279 tcg_gen_add_i32(tmp, tmp, tmp2);
6282 if (insn & (1 << 20))
6284 store_reg(s, rd, tmp);
6288 tmp = load_reg(s, rs);
6289 tmp2 = load_reg(s, rm);
6290 if (insn & (1 << 22))
6291 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6293 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6294 if (insn & (1 << 21)) /* mult accumulate */
6295 gen_addq(s, tmp64, rn, rd);
6296 if (!(insn & (1 << 23))) { /* double accumulate */
6298 gen_addq_lo(s, tmp64, rn);
6299 gen_addq_lo(s, tmp64, rd);
6301 if (insn & (1 << 20))
6302 gen_logicq_cc(tmp64);
6303 gen_storeq_reg(s, rn, rd, tmp64);
6307 rn = (insn >> 16) & 0xf;
6308 rd = (insn >> 12) & 0xf;
6309 if (insn & (1 << 23)) {
6310 /* load/store exclusive */
6311 op1 = (insn >> 21) & 0x3;
6316 gen_movl_T1_reg(s, rn);
6318 if (insn & (1 << 20)) {
6319 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6322 tmp = gen_ld32(addr, IS_USER(s));
6324 case 1: /* ldrexd */
6325 tmp = gen_ld32(addr, IS_USER(s));
6326 store_reg(s, rd, tmp);
6327 tcg_gen_addi_i32(addr, addr, 4);
6328 tmp = gen_ld32(addr, IS_USER(s));
6331 case 2: /* ldrexb */
6332 tmp = gen_ld8u(addr, IS_USER(s));
6334 case 3: /* ldrexh */
6335 tmp = gen_ld16u(addr, IS_USER(s));
6340 store_reg(s, rd, tmp);
6342 int label = gen_new_label();
6344 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6345 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6347 tmp = load_reg(s,rm);
6350 gen_st32(tmp, addr, IS_USER(s));
6352 case 1: /* strexd */
6353 gen_st32(tmp, addr, IS_USER(s));
6354 tcg_gen_addi_i32(addr, addr, 4);
6355 tmp = load_reg(s, rm + 1);
6356 gen_st32(tmp, addr, IS_USER(s));
6358 case 2: /* strexb */
6359 gen_st8(tmp, addr, IS_USER(s));
6361 case 3: /* strexh */
6362 gen_st16(tmp, addr, IS_USER(s));
6367 gen_set_label(label);
6368 gen_movl_reg_T0(s, rd);
6371 /* SWP instruction */
6374 /* ??? This is not really atomic. However we know
6375 we never have multiple CPUs running in parallel,
6376 so it is good enough. */
6377 addr = load_reg(s, rn);
6378 tmp = load_reg(s, rm);
6379 if (insn & (1 << 22)) {
6380 tmp2 = gen_ld8u(addr, IS_USER(s));
6381 gen_st8(tmp, addr, IS_USER(s));
6383 tmp2 = gen_ld32(addr, IS_USER(s));
6384 gen_st32(tmp, addr, IS_USER(s));
6387 store_reg(s, rd, tmp2);
6393 /* Misc load/store */
6394 rn = (insn >> 16) & 0xf;
6395 rd = (insn >> 12) & 0xf;
6396 addr = load_reg(s, rn);
6397 if (insn & (1 << 24))
6398 gen_add_datah_offset(s, insn, 0, addr);
6400 if (insn & (1 << 20)) {
6404 tmp = gen_ld16u(addr, IS_USER(s));
6407 tmp = gen_ld8s(addr, IS_USER(s));
6411 tmp = gen_ld16s(addr, IS_USER(s));
6415 } else if (sh & 2) {
6419 tmp = load_reg(s, rd);
6420 gen_st32(tmp, addr, IS_USER(s));
6421 tcg_gen_addi_i32(addr, addr, 4);
6422 tmp = load_reg(s, rd + 1);
6423 gen_st32(tmp, addr, IS_USER(s));
6427 tmp = gen_ld32(addr, IS_USER(s));
6428 store_reg(s, rd, tmp);
6429 tcg_gen_addi_i32(addr, addr, 4);
6430 tmp = gen_ld32(addr, IS_USER(s));
6434 address_offset = -4;
6437 tmp = load_reg(s, rd);
6438 gen_st16(tmp, addr, IS_USER(s));
6441 /* Perform base writeback before the loaded value to
6442 ensure correct behavior with overlapping index registers.
6443 ldrd with base writeback is is undefined if the
6444 destination and index registers overlap. */
6445 if (!(insn & (1 << 24))) {
6446 gen_add_datah_offset(s, insn, address_offset, addr);
6447 store_reg(s, rn, addr);
6448 } else if (insn & (1 << 21)) {
6450 tcg_gen_addi_i32(addr, addr, address_offset);
6451 store_reg(s, rn, addr);
6456 /* Complete the load. */
6457 store_reg(s, rd, tmp);
6466 if (insn & (1 << 4)) {
6468 /* Armv6 Media instructions. */
6470 rn = (insn >> 16) & 0xf;
6471 rd = (insn >> 12) & 0xf;
6472 rs = (insn >> 8) & 0xf;
6473 switch ((insn >> 23) & 3) {
6474 case 0: /* Parallel add/subtract. */
6475 op1 = (insn >> 20) & 7;
6476 tmp = load_reg(s, rn);
6477 tmp2 = load_reg(s, rm);
6478 sh = (insn >> 5) & 7;
6479 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6481 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6483 store_reg(s, rd, tmp);
6486 if ((insn & 0x00700020) == 0) {
6487 /* Halfword pack. */
6488 tmp = load_reg(s, rn);
6489 tmp2 = load_reg(s, rm);
6490 shift = (insn >> 7) & 0x1f;
6491 if (insn & (1 << 6)) {
6495 tcg_gen_sari_i32(tmp2, tmp2, shift);
6496 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6497 tcg_gen_ext16u_i32(tmp2, tmp2);
6501 tcg_gen_shli_i32(tmp2, tmp2, shift);
6502 tcg_gen_ext16u_i32(tmp, tmp);
6503 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6505 tcg_gen_or_i32(tmp, tmp, tmp2);
6507 store_reg(s, rd, tmp);
6508 } else if ((insn & 0x00200020) == 0x00200000) {
6510 tmp = load_reg(s, rm);
6511 shift = (insn >> 7) & 0x1f;
6512 if (insn & (1 << 6)) {
6515 tcg_gen_sari_i32(tmp, tmp, shift);
6517 tcg_gen_shli_i32(tmp, tmp, shift);
6519 sh = (insn >> 16) & 0x1f;
6521 if (insn & (1 << 22))
6522 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6524 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6526 store_reg(s, rd, tmp);
6527 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6529 tmp = load_reg(s, rm);
6530 sh = (insn >> 16) & 0x1f;
6532 if (insn & (1 << 22))
6533 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6535 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6537 store_reg(s, rd, tmp);
6538 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6540 tmp = load_reg(s, rn);
6541 tmp2 = load_reg(s, rm);
6543 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6544 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6547 store_reg(s, rd, tmp);
6548 } else if ((insn & 0x000003e0) == 0x00000060) {
6549 tmp = load_reg(s, rm);
6550 shift = (insn >> 10) & 3;
6551 /* ??? In many cases it's not neccessary to do a
6552 rotate, a shift is sufficient. */
6554 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6555 op1 = (insn >> 20) & 7;
6557 case 0: gen_sxtb16(tmp); break;
6558 case 2: gen_sxtb(tmp); break;
6559 case 3: gen_sxth(tmp); break;
6560 case 4: gen_uxtb16(tmp); break;
6561 case 6: gen_uxtb(tmp); break;
6562 case 7: gen_uxth(tmp); break;
6563 default: goto illegal_op;
6566 tmp2 = load_reg(s, rn);
6567 if ((op1 & 3) == 0) {
6568 gen_add16(tmp, tmp2);
6570 tcg_gen_add_i32(tmp, tmp, tmp2);
6574 store_reg(s, rd, tmp);
6575 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6577 tmp = load_reg(s, rm);
6578 if (insn & (1 << 22)) {
6579 if (insn & (1 << 7)) {
6583 gen_helper_rbit(tmp, tmp);
6586 if (insn & (1 << 7))
6589 tcg_gen_bswap_i32(tmp, tmp);
6591 store_reg(s, rd, tmp);
6596 case 2: /* Multiplies (Type 3). */
6597 tmp = load_reg(s, rm);
6598 tmp2 = load_reg(s, rs);
6599 if (insn & (1 << 20)) {
6600 /* Signed multiply most significant [accumulate]. */
6601 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6602 if (insn & (1 << 5))
6603 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6604 tcg_gen_shri_i64(tmp64, tmp64, 32);
6606 tcg_gen_trunc_i64_i32(tmp, tmp64);
6608 tmp2 = load_reg(s, rd);
6609 if (insn & (1 << 6)) {
6610 tcg_gen_sub_i32(tmp, tmp, tmp2);
6612 tcg_gen_add_i32(tmp, tmp, tmp2);
6616 store_reg(s, rn, tmp);
6618 if (insn & (1 << 5))
6619 gen_swap_half(tmp2);
6620 gen_smul_dual(tmp, tmp2);
6621 /* This addition cannot overflow. */
6622 if (insn & (1 << 6)) {
6623 tcg_gen_sub_i32(tmp, tmp, tmp2);
6625 tcg_gen_add_i32(tmp, tmp, tmp2);
6628 if (insn & (1 << 22)) {
6629 /* smlald, smlsld */
6630 tmp64 = tcg_temp_new_i64();
6631 tcg_gen_ext_i32_i64(tmp64, tmp);
6633 gen_addq(s, tmp64, rd, rn);
6634 gen_storeq_reg(s, rd, rn, tmp64);
6636 /* smuad, smusd, smlad, smlsd */
6639 tmp2 = load_reg(s, rd);
6640 gen_helper_add_setq(tmp, tmp, tmp2);
6643 store_reg(s, rn, tmp);
6648 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6650 case 0: /* Unsigned sum of absolute differences. */
6652 tmp = load_reg(s, rm);
6653 tmp2 = load_reg(s, rs);
6654 gen_helper_usad8(tmp, tmp, tmp2);
6657 tmp2 = load_reg(s, rd);
6658 tcg_gen_add_i32(tmp, tmp, tmp2);
6661 store_reg(s, rn, tmp);
6663 case 0x20: case 0x24: case 0x28: case 0x2c:
6664 /* Bitfield insert/clear. */
6666 shift = (insn >> 7) & 0x1f;
6667 i = (insn >> 16) & 0x1f;
6671 tcg_gen_movi_i32(tmp, 0);
6673 tmp = load_reg(s, rm);
6676 tmp2 = load_reg(s, rd);
6677 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6680 store_reg(s, rd, tmp);
6682 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6683 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6685 tmp = load_reg(s, rm);
6686 shift = (insn >> 7) & 0x1f;
6687 i = ((insn >> 16) & 0x1f) + 1;
6692 gen_ubfx(tmp, shift, (1u << i) - 1);
6694 gen_sbfx(tmp, shift, i);
6697 store_reg(s, rd, tmp);
6707 /* Check for undefined extension instructions
6708 * per the ARM Bible IE:
6709 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6711 sh = (0xf << 20) | (0xf << 4);
6712 if (op1 == 0x7 && ((insn & sh) == sh))
6716 /* load/store byte/word */
6717 rn = (insn >> 16) & 0xf;
6718 rd = (insn >> 12) & 0xf;
6719 tmp2 = load_reg(s, rn);
6720 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6721 if (insn & (1 << 24))
6722 gen_add_data_offset(s, insn, tmp2);
6723 if (insn & (1 << 20)) {
6725 if (insn & (1 << 22)) {
6726 tmp = gen_ld8u(tmp2, i);
6728 tmp = gen_ld32(tmp2, i);
6732 tmp = load_reg(s, rd);
6733 if (insn & (1 << 22))
6734 gen_st8(tmp, tmp2, i);
6736 gen_st32(tmp, tmp2, i);
6738 if (!(insn & (1 << 24))) {
6739 gen_add_data_offset(s, insn, tmp2);
6740 store_reg(s, rn, tmp2);
6741 } else if (insn & (1 << 21)) {
6742 store_reg(s, rn, tmp2);
6746 if (insn & (1 << 20)) {
6747 /* Complete the load. */
6751 store_reg(s, rd, tmp);
6757 int j, n, user, loaded_base;
6759 /* load/store multiple words */
6760 /* XXX: store correct base if write back */
6762 if (insn & (1 << 22)) {
6764 goto illegal_op; /* only usable in supervisor mode */
6766 if ((insn & (1 << 15)) == 0)
6769 rn = (insn >> 16) & 0xf;
6770 addr = load_reg(s, rn);
6772 /* compute total size */
6774 TCGV_UNUSED(loaded_var);
6777 if (insn & (1 << i))
6780 /* XXX: test invalid n == 0 case ? */
6781 if (insn & (1 << 23)) {
6782 if (insn & (1 << 24)) {
6784 tcg_gen_addi_i32(addr, addr, 4);
6786 /* post increment */
6789 if (insn & (1 << 24)) {
6791 tcg_gen_addi_i32(addr, addr, -(n * 4));
6793 /* post decrement */
6795 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6800 if (insn & (1 << i)) {
6801 if (insn & (1 << 20)) {
6803 tmp = gen_ld32(addr, IS_USER(s));
6807 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6809 } else if (i == rn) {
6813 store_reg(s, i, tmp);
6818 /* special case: r15 = PC + 8 */
6819 val = (long)s->pc + 4;
6821 tcg_gen_movi_i32(tmp, val);
6824 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6826 tmp = load_reg(s, i);
6828 gen_st32(tmp, addr, IS_USER(s));
6831 /* no need to add after the last transfer */
6833 tcg_gen_addi_i32(addr, addr, 4);
6836 if (insn & (1 << 21)) {
6838 if (insn & (1 << 23)) {
6839 if (insn & (1 << 24)) {
6842 /* post increment */
6843 tcg_gen_addi_i32(addr, addr, 4);
6846 if (insn & (1 << 24)) {
6849 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6851 /* post decrement */
6852 tcg_gen_addi_i32(addr, addr, -(n * 4));
6855 store_reg(s, rn, addr);
6860 store_reg(s, rn, loaded_var);
6862 if ((insn & (1 << 22)) && !user) {
6863 /* Restore CPSR from SPSR. */
6864 tmp = load_cpu_field(spsr);
6865 gen_set_cpsr(tmp, 0xffffffff);
6867 s->is_jmp = DISAS_UPDATE;
6876 /* branch (and link) */
6877 val = (int32_t)s->pc;
6878 if (insn & (1 << 24)) {
6880 tcg_gen_movi_i32(tmp, val);
6881 store_reg(s, 14, tmp);
6883 offset = (((int32_t)insn << 8) >> 8);
6884 val += (offset << 2) + 4;
6892 if (disas_coproc_insn(env, s, insn))
6897 gen_set_pc_im(s->pc);
6898 s->is_jmp = DISAS_SWI;
6902 gen_set_condexec(s);
6903 gen_set_pc_im(s->pc - 4);
6904 gen_exception(EXCP_UDEF);
6905 s->is_jmp = DISAS_JUMP;
6911 /* Return true if this is a Thumb-2 logical op. */
6913 thumb2_logic_op(int op)
6918 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6919 then set condition code flags based on the result of the operation.
6920 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6921 to the high bit of T1.
6922 Returns zero if the opcode is valid. */
6925 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6932 gen_op_andl_T0_T1();
6936 gen_op_bicl_T0_T1();
6949 gen_op_xorl_T0_T1();
6954 gen_op_addl_T0_T1_cc();
6956 gen_op_addl_T0_T1();
6960 gen_op_adcl_T0_T1_cc();
6966 gen_op_sbcl_T0_T1_cc();
6972 gen_op_subl_T0_T1_cc();
6974 gen_op_subl_T0_T1();
6978 gen_op_rsbl_T0_T1_cc();
6980 gen_op_rsbl_T0_T1();
6982 default: /* 5, 6, 7, 9, 12, 15. */
6986 gen_op_logic_T0_cc();
6988 gen_set_CF_bit31(cpu_T[1]);
6993 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6995 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6997 uint32_t insn, imm, shift, offset;
6998 uint32_t rd, rn, rm, rs;
7009 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7010 || arm_feature (env, ARM_FEATURE_M))) {
7011 /* Thumb-1 cores may need to treat bl and blx as a pair of
7012 16-bit instructions to get correct prefetch abort behavior. */
7014 if ((insn & (1 << 12)) == 0) {
7015 /* Second half of blx. */
7016 offset = ((insn & 0x7ff) << 1);
7017 tmp = load_reg(s, 14);
7018 tcg_gen_addi_i32(tmp, tmp, offset);
7019 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7022 tcg_gen_movi_i32(tmp2, s->pc | 1);
7023 store_reg(s, 14, tmp2);
7027 if (insn & (1 << 11)) {
7028 /* Second half of bl. */
7029 offset = ((insn & 0x7ff) << 1) | 1;
7030 tmp = load_reg(s, 14);
7031 tcg_gen_addi_i32(tmp, tmp, offset);
7034 tcg_gen_movi_i32(tmp2, s->pc | 1);
7035 store_reg(s, 14, tmp2);
7039 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7040 /* Instruction spans a page boundary. Implement it as two
7041 16-bit instructions in case the second half causes an
7043 offset = ((int32_t)insn << 21) >> 9;
7044 gen_op_movl_T0_im(s->pc + 2 + offset);
7045 gen_movl_reg_T0(s, 14);
7048 /* Fall through to 32-bit decode. */
7051 insn = lduw_code(s->pc);
7053 insn |= (uint32_t)insn_hw1 << 16;
7055 if ((insn & 0xf800e800) != 0xf000e800) {
7059 rn = (insn >> 16) & 0xf;
7060 rs = (insn >> 12) & 0xf;
7061 rd = (insn >> 8) & 0xf;
7063 switch ((insn >> 25) & 0xf) {
7064 case 0: case 1: case 2: case 3:
7065 /* 16-bit instructions. Should never happen. */
7068 if (insn & (1 << 22)) {
7069 /* Other load/store, table branch. */
7070 if (insn & 0x01200000) {
7071 /* Load/store doubleword. */
7074 tcg_gen_movi_i32(addr, s->pc & ~3);
7076 addr = load_reg(s, rn);
7078 offset = (insn & 0xff) * 4;
7079 if ((insn & (1 << 23)) == 0)
7081 if (insn & (1 << 24)) {
7082 tcg_gen_addi_i32(addr, addr, offset);
7085 if (insn & (1 << 20)) {
7087 tmp = gen_ld32(addr, IS_USER(s));
7088 store_reg(s, rs, tmp);
7089 tcg_gen_addi_i32(addr, addr, 4);
7090 tmp = gen_ld32(addr, IS_USER(s));
7091 store_reg(s, rd, tmp);
7094 tmp = load_reg(s, rs);
7095 gen_st32(tmp, addr, IS_USER(s));
7096 tcg_gen_addi_i32(addr, addr, 4);
7097 tmp = load_reg(s, rd);
7098 gen_st32(tmp, addr, IS_USER(s));
7100 if (insn & (1 << 21)) {
7101 /* Base writeback. */
7104 tcg_gen_addi_i32(addr, addr, offset - 4);
7105 store_reg(s, rn, addr);
7109 } else if ((insn & (1 << 23)) == 0) {
7110 /* Load/store exclusive word. */
7111 gen_movl_T1_reg(s, rn);
7113 if (insn & (1 << 20)) {
7114 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7115 tmp = gen_ld32(addr, IS_USER(s));
7116 store_reg(s, rd, tmp);
7118 int label = gen_new_label();
7119 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7120 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7122 tmp = load_reg(s, rs);
7123 gen_st32(tmp, cpu_T[1], IS_USER(s));
7124 gen_set_label(label);
7125 gen_movl_reg_T0(s, rd);
7127 } else if ((insn & (1 << 6)) == 0) {
7131 tcg_gen_movi_i32(addr, s->pc);
7133 addr = load_reg(s, rn);
7135 tmp = load_reg(s, rm);
7136 tcg_gen_add_i32(addr, addr, tmp);
7137 if (insn & (1 << 4)) {
7139 tcg_gen_add_i32(addr, addr, tmp);
7141 tmp = gen_ld16u(addr, IS_USER(s));
7144 tmp = gen_ld8u(addr, IS_USER(s));
7147 tcg_gen_shli_i32(tmp, tmp, 1);
7148 tcg_gen_addi_i32(tmp, tmp, s->pc);
7149 store_reg(s, 15, tmp);
7151 /* Load/store exclusive byte/halfword/doubleword. */
7152 /* ??? These are not really atomic. However we know
7153 we never have multiple CPUs running in parallel,
7154 so it is good enough. */
7155 op = (insn >> 4) & 0x3;
7156 /* Must use a global reg for the address because we have
7157 a conditional branch in the store instruction. */
7158 gen_movl_T1_reg(s, rn);
7160 if (insn & (1 << 20)) {
7161 gen_helper_mark_exclusive(cpu_env, addr);
7164 tmp = gen_ld8u(addr, IS_USER(s));
7167 tmp = gen_ld16u(addr, IS_USER(s));
7170 tmp = gen_ld32(addr, IS_USER(s));
7171 tcg_gen_addi_i32(addr, addr, 4);
7172 tmp2 = gen_ld32(addr, IS_USER(s));
7173 store_reg(s, rd, tmp2);
7178 store_reg(s, rs, tmp);
7180 int label = gen_new_label();
7181 /* Must use a global that is not killed by the branch. */
7182 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7183 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7184 tmp = load_reg(s, rs);
7187 gen_st8(tmp, addr, IS_USER(s));
7190 gen_st16(tmp, addr, IS_USER(s));
7193 gen_st32(tmp, addr, IS_USER(s));
7194 tcg_gen_addi_i32(addr, addr, 4);
7195 tmp = load_reg(s, rd);
7196 gen_st32(tmp, addr, IS_USER(s));
7201 gen_set_label(label);
7202 gen_movl_reg_T0(s, rm);
7206 /* Load/store multiple, RFE, SRS. */
7207 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7208 /* Not available in user mode. */
7211 if (insn & (1 << 20)) {
7213 addr = load_reg(s, rn);
7214 if ((insn & (1 << 24)) == 0)
7215 tcg_gen_addi_i32(addr, addr, -8);
7216 /* Load PC into tmp and CPSR into tmp2. */
7217 tmp = gen_ld32(addr, 0);
7218 tcg_gen_addi_i32(addr, addr, 4);
7219 tmp2 = gen_ld32(addr, 0);
7220 if (insn & (1 << 21)) {
7221 /* Base writeback. */
7222 if (insn & (1 << 24)) {
7223 tcg_gen_addi_i32(addr, addr, 4);
7225 tcg_gen_addi_i32(addr, addr, -4);
7227 store_reg(s, rn, addr);
7231 gen_rfe(s, tmp, tmp2);
7235 if (op == (env->uncached_cpsr & CPSR_M)) {
7236 addr = load_reg(s, 13);
7239 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7241 if ((insn & (1 << 24)) == 0) {
7242 tcg_gen_addi_i32(addr, addr, -8);
7244 tmp = load_reg(s, 14);
7245 gen_st32(tmp, addr, 0);
7246 tcg_gen_addi_i32(addr, addr, 4);
7248 gen_helper_cpsr_read(tmp);
7249 gen_st32(tmp, addr, 0);
7250 if (insn & (1 << 21)) {
7251 if ((insn & (1 << 24)) == 0) {
7252 tcg_gen_addi_i32(addr, addr, -4);
7254 tcg_gen_addi_i32(addr, addr, 4);
7256 if (op == (env->uncached_cpsr & CPSR_M)) {
7257 store_reg(s, 13, addr);
7259 gen_helper_set_r13_banked(cpu_env,
7260 tcg_const_i32(op), addr);
7268 /* Load/store multiple. */
7269 addr = load_reg(s, rn);
7271 for (i = 0; i < 16; i++) {
7272 if (insn & (1 << i))
7275 if (insn & (1 << 24)) {
7276 tcg_gen_addi_i32(addr, addr, -offset);
7279 for (i = 0; i < 16; i++) {
7280 if ((insn & (1 << i)) == 0)
7282 if (insn & (1 << 20)) {
7284 tmp = gen_ld32(addr, IS_USER(s));
7288 store_reg(s, i, tmp);
7292 tmp = load_reg(s, i);
7293 gen_st32(tmp, addr, IS_USER(s));
7295 tcg_gen_addi_i32(addr, addr, 4);
7297 if (insn & (1 << 21)) {
7298 /* Base register writeback. */
7299 if (insn & (1 << 24)) {
7300 tcg_gen_addi_i32(addr, addr, -offset);
7302 /* Fault if writeback register is in register list. */
7303 if (insn & (1 << rn))
7305 store_reg(s, rn, addr);
7312 case 5: /* Data processing register constant shift. */
7314 gen_op_movl_T0_im(0);
7316 gen_movl_T0_reg(s, rn);
7317 gen_movl_T1_reg(s, rm);
7318 op = (insn >> 21) & 0xf;
7319 shiftop = (insn >> 4) & 3;
7320 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7321 conds = (insn & (1 << 20)) != 0;
7322 logic_cc = (conds && thumb2_logic_op(op));
7323 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7324 if (gen_thumb2_data_op(s, op, conds, 0))
7327 gen_movl_reg_T0(s, rd);
7329 case 13: /* Misc data processing. */
7330 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7331 if (op < 4 && (insn & 0xf000) != 0xf000)
7334 case 0: /* Register controlled shift. */
7335 tmp = load_reg(s, rn);
7336 tmp2 = load_reg(s, rm);
7337 if ((insn & 0x70) != 0)
7339 op = (insn >> 21) & 3;
7340 logic_cc = (insn & (1 << 20)) != 0;
7341 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7344 store_reg(s, rd, tmp);
7346 case 1: /* Sign/zero extend. */
7347 tmp = load_reg(s, rm);
7348 shift = (insn >> 4) & 3;
7349 /* ??? In many cases it's not neccessary to do a
7350 rotate, a shift is sufficient. */
7352 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7353 op = (insn >> 20) & 7;
7355 case 0: gen_sxth(tmp); break;
7356 case 1: gen_uxth(tmp); break;
7357 case 2: gen_sxtb16(tmp); break;
7358 case 3: gen_uxtb16(tmp); break;
7359 case 4: gen_sxtb(tmp); break;
7360 case 5: gen_uxtb(tmp); break;
7361 default: goto illegal_op;
7364 tmp2 = load_reg(s, rn);
7365 if ((op >> 1) == 1) {
7366 gen_add16(tmp, tmp2);
7368 tcg_gen_add_i32(tmp, tmp, tmp2);
7372 store_reg(s, rd, tmp);
7374 case 2: /* SIMD add/subtract. */
7375 op = (insn >> 20) & 7;
7376 shift = (insn >> 4) & 7;
7377 if ((op & 3) == 3 || (shift & 3) == 3)
7379 tmp = load_reg(s, rn);
7380 tmp2 = load_reg(s, rm);
7381 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7383 store_reg(s, rd, tmp);
7385 case 3: /* Other data processing. */
7386 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7388 /* Saturating add/subtract. */
7389 tmp = load_reg(s, rn);
7390 tmp2 = load_reg(s, rm);
7392 gen_helper_double_saturate(tmp, tmp);
7394 gen_helper_sub_saturate(tmp, tmp2, tmp);
7396 gen_helper_add_saturate(tmp, tmp, tmp2);
7399 tmp = load_reg(s, rn);
7401 case 0x0a: /* rbit */
7402 gen_helper_rbit(tmp, tmp);
7404 case 0x08: /* rev */
7405 tcg_gen_bswap_i32(tmp, tmp);
7407 case 0x09: /* rev16 */
7410 case 0x0b: /* revsh */
7413 case 0x10: /* sel */
7414 tmp2 = load_reg(s, rm);
7416 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7417 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7421 case 0x18: /* clz */
7422 gen_helper_clz(tmp, tmp);
7428 store_reg(s, rd, tmp);
7430 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7431 op = (insn >> 4) & 0xf;
7432 tmp = load_reg(s, rn);
7433 tmp2 = load_reg(s, rm);
7434 switch ((insn >> 20) & 7) {
7435 case 0: /* 32 x 32 -> 32 */
7436 tcg_gen_mul_i32(tmp, tmp, tmp2);
7439 tmp2 = load_reg(s, rs);
7441 tcg_gen_sub_i32(tmp, tmp2, tmp);
7443 tcg_gen_add_i32(tmp, tmp, tmp2);
7447 case 1: /* 16 x 16 -> 32 */
7448 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7451 tmp2 = load_reg(s, rs);
7452 gen_helper_add_setq(tmp, tmp, tmp2);
7456 case 2: /* Dual multiply add. */
7457 case 4: /* Dual multiply subtract. */
7459 gen_swap_half(tmp2);
7460 gen_smul_dual(tmp, tmp2);
7461 /* This addition cannot overflow. */
7462 if (insn & (1 << 22)) {
7463 tcg_gen_sub_i32(tmp, tmp, tmp2);
7465 tcg_gen_add_i32(tmp, tmp, tmp2);
7470 tmp2 = load_reg(s, rs);
7471 gen_helper_add_setq(tmp, tmp, tmp2);
7475 case 3: /* 32 * 16 -> 32msb */
7477 tcg_gen_sari_i32(tmp2, tmp2, 16);
7480 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7481 tcg_gen_shri_i64(tmp64, tmp64, 16);
7483 tcg_gen_trunc_i64_i32(tmp, tmp64);
7486 tmp2 = load_reg(s, rs);
7487 gen_helper_add_setq(tmp, tmp, tmp2);
7491 case 5: case 6: /* 32 * 32 -> 32msb */
7492 gen_imull(tmp, tmp2);
7493 if (insn & (1 << 5)) {
7494 gen_roundqd(tmp, tmp2);
7501 tmp2 = load_reg(s, rs);
7502 if (insn & (1 << 21)) {
7503 tcg_gen_add_i32(tmp, tmp, tmp2);
7505 tcg_gen_sub_i32(tmp, tmp2, tmp);
7510 case 7: /* Unsigned sum of absolute differences. */
7511 gen_helper_usad8(tmp, tmp, tmp2);
7514 tmp2 = load_reg(s, rs);
7515 tcg_gen_add_i32(tmp, tmp, tmp2);
7520 store_reg(s, rd, tmp);
7522 case 6: case 7: /* 64-bit multiply, Divide. */
7523 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7524 tmp = load_reg(s, rn);
7525 tmp2 = load_reg(s, rm);
7526 if ((op & 0x50) == 0x10) {
7528 if (!arm_feature(env, ARM_FEATURE_DIV))
7531 gen_helper_udiv(tmp, tmp, tmp2);
7533 gen_helper_sdiv(tmp, tmp, tmp2);
7535 store_reg(s, rd, tmp);
7536 } else if ((op & 0xe) == 0xc) {
7537 /* Dual multiply accumulate long. */
7539 gen_swap_half(tmp2);
7540 gen_smul_dual(tmp, tmp2);
7542 tcg_gen_sub_i32(tmp, tmp, tmp2);
7544 tcg_gen_add_i32(tmp, tmp, tmp2);
7548 tmp64 = tcg_temp_new_i64();
7549 tcg_gen_ext_i32_i64(tmp64, tmp);
7551 gen_addq(s, tmp64, rs, rd);
7552 gen_storeq_reg(s, rs, rd, tmp64);
7555 /* Unsigned 64-bit multiply */
7556 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7560 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7562 tmp64 = tcg_temp_new_i64();
7563 tcg_gen_ext_i32_i64(tmp64, tmp);
7566 /* Signed 64-bit multiply */
7567 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7572 gen_addq_lo(s, tmp64, rs);
7573 gen_addq_lo(s, tmp64, rd);
7574 } else if (op & 0x40) {
7575 /* 64-bit accumulate. */
7576 gen_addq(s, tmp64, rs, rd);
7578 gen_storeq_reg(s, rs, rd, tmp64);
7583 case 6: case 7: case 14: case 15:
7585 if (((insn >> 24) & 3) == 3) {
7586 /* Translate into the equivalent ARM encoding. */
7587 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7588 if (disas_neon_data_insn(env, s, insn))
7591 if (insn & (1 << 28))
7593 if (disas_coproc_insn (env, s, insn))
7597 case 8: case 9: case 10: case 11:
7598 if (insn & (1 << 15)) {
7599 /* Branches, misc control. */
7600 if (insn & 0x5000) {
7601 /* Unconditional branch. */
7602 /* signextend(hw1[10:0]) -> offset[:12]. */
7603 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7604 /* hw1[10:0] -> offset[11:1]. */
7605 offset |= (insn & 0x7ff) << 1;
7606 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7607 offset[24:22] already have the same value because of the
7608 sign extension above. */
7609 offset ^= ((~insn) & (1 << 13)) << 10;
7610 offset ^= ((~insn) & (1 << 11)) << 11;
7612 if (insn & (1 << 14)) {
7613 /* Branch and link. */
7614 gen_op_movl_T1_im(s->pc | 1);
7615 gen_movl_reg_T1(s, 14);
7619 if (insn & (1 << 12)) {
7624 offset &= ~(uint32_t)2;
7625 gen_bx_im(s, offset);
7627 } else if (((insn >> 23) & 7) == 7) {
7629 if (insn & (1 << 13))
7632 if (insn & (1 << 26)) {
7633 /* Secure monitor call (v6Z) */
7634 goto illegal_op; /* not implemented. */
7636 op = (insn >> 20) & 7;
7638 case 0: /* msr cpsr. */
7640 tmp = load_reg(s, rn);
7641 addr = tcg_const_i32(insn & 0xff);
7642 gen_helper_v7m_msr(cpu_env, addr, tmp);
7647 case 1: /* msr spsr. */
7650 gen_movl_T0_reg(s, rn);
7651 if (gen_set_psr_T0(s,
7652 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7656 case 2: /* cps, nop-hint. */
7657 if (((insn >> 8) & 7) == 0) {
7658 gen_nop_hint(s, insn & 0xff);
7660 /* Implemented as NOP in user mode. */
7665 if (insn & (1 << 10)) {
7666 if (insn & (1 << 7))
7668 if (insn & (1 << 6))
7670 if (insn & (1 << 5))
7672 if (insn & (1 << 9))
7673 imm = CPSR_A | CPSR_I | CPSR_F;
7675 if (insn & (1 << 8)) {
7677 imm |= (insn & 0x1f);
7680 gen_op_movl_T0_im(imm);
7681 gen_set_psr_T0(s, offset, 0);
7684 case 3: /* Special control operations. */
7685 op = (insn >> 4) & 0xf;
7688 gen_helper_clrex(cpu_env);
7693 /* These execute as NOPs. */
7701 /* Trivial implementation equivalent to bx. */
7702 tmp = load_reg(s, rn);
7705 case 5: /* Exception return. */
7706 /* Unpredictable in user mode. */
7708 case 6: /* mrs cpsr. */
7711 addr = tcg_const_i32(insn & 0xff);
7712 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7714 gen_helper_cpsr_read(tmp);
7716 store_reg(s, rd, tmp);
7718 case 7: /* mrs spsr. */
7719 /* Not accessible in user mode. */
7720 if (IS_USER(s) || IS_M(env))
7722 tmp = load_cpu_field(spsr);
7723 store_reg(s, rd, tmp);
7728 /* Conditional branch. */
7729 op = (insn >> 22) & 0xf;
7730 /* Generate a conditional jump to next instruction. */
7731 s->condlabel = gen_new_label();
7732 gen_test_cc(op ^ 1, s->condlabel);
7735 /* offset[11:1] = insn[10:0] */
7736 offset = (insn & 0x7ff) << 1;
7737 /* offset[17:12] = insn[21:16]. */
7738 offset |= (insn & 0x003f0000) >> 4;
7739 /* offset[31:20] = insn[26]. */
7740 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7741 /* offset[18] = insn[13]. */
7742 offset |= (insn & (1 << 13)) << 5;
7743 /* offset[19] = insn[11]. */
7744 offset |= (insn & (1 << 11)) << 8;
7746 /* jump to the offset */
7747 gen_jmp(s, s->pc + offset);
7750 /* Data processing immediate. */
7751 if (insn & (1 << 25)) {
7752 if (insn & (1 << 24)) {
7753 if (insn & (1 << 20))
7755 /* Bitfield/Saturate. */
7756 op = (insn >> 21) & 7;
7758 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7761 tcg_gen_movi_i32(tmp, 0);
7763 tmp = load_reg(s, rn);
7766 case 2: /* Signed bitfield extract. */
7768 if (shift + imm > 32)
7771 gen_sbfx(tmp, shift, imm);
7773 case 6: /* Unsigned bitfield extract. */
7775 if (shift + imm > 32)
7778 gen_ubfx(tmp, shift, (1u << imm) - 1);
7780 case 3: /* Bitfield insert/clear. */
7783 imm = imm + 1 - shift;
7785 tmp2 = load_reg(s, rd);
7786 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7792 default: /* Saturate. */
7795 tcg_gen_sari_i32(tmp, tmp, shift);
7797 tcg_gen_shli_i32(tmp, tmp, shift);
7799 tmp2 = tcg_const_i32(imm);
7802 if ((op & 1) && shift == 0)
7803 gen_helper_usat16(tmp, tmp, tmp2);
7805 gen_helper_usat(tmp, tmp, tmp2);
7808 if ((op & 1) && shift == 0)
7809 gen_helper_ssat16(tmp, tmp, tmp2);
7811 gen_helper_ssat(tmp, tmp, tmp2);
7815 store_reg(s, rd, tmp);
7817 imm = ((insn & 0x04000000) >> 15)
7818 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7819 if (insn & (1 << 22)) {
7820 /* 16-bit immediate. */
7821 imm |= (insn >> 4) & 0xf000;
7822 if (insn & (1 << 23)) {
7824 tmp = load_reg(s, rd);
7825 tcg_gen_ext16u_i32(tmp, tmp);
7826 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7830 tcg_gen_movi_i32(tmp, imm);
7833 /* Add/sub 12-bit immediate. */
7835 offset = s->pc & ~(uint32_t)3;
7836 if (insn & (1 << 23))
7841 tcg_gen_movi_i32(tmp, offset);
7843 tmp = load_reg(s, rn);
7844 if (insn & (1 << 23))
7845 tcg_gen_subi_i32(tmp, tmp, imm);
7847 tcg_gen_addi_i32(tmp, tmp, imm);
7850 store_reg(s, rd, tmp);
7853 int shifter_out = 0;
7854 /* modified 12-bit immediate. */
7855 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7856 imm = (insn & 0xff);
7859 /* Nothing to do. */
7861 case 1: /* 00XY00XY */
7864 case 2: /* XY00XY00 */
7868 case 3: /* XYXYXYXY */
7872 default: /* Rotated constant. */
7873 shift = (shift << 1) | (imm >> 7);
7875 imm = imm << (32 - shift);
7879 gen_op_movl_T1_im(imm);
7880 rn = (insn >> 16) & 0xf;
7882 gen_op_movl_T0_im(0);
7884 gen_movl_T0_reg(s, rn);
7885 op = (insn >> 21) & 0xf;
7886 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7889 rd = (insn >> 8) & 0xf;
7891 gen_movl_reg_T0(s, rd);
7896 case 12: /* Load/store single data item. */
7901 if ((insn & 0x01100000) == 0x01000000) {
7902 if (disas_neon_ls_insn(env, s, insn))
7910 /* s->pc has already been incremented by 4. */
7911 imm = s->pc & 0xfffffffc;
7912 if (insn & (1 << 23))
7913 imm += insn & 0xfff;
7915 imm -= insn & 0xfff;
7916 tcg_gen_movi_i32(addr, imm);
7918 addr = load_reg(s, rn);
7919 if (insn & (1 << 23)) {
7920 /* Positive offset. */
7922 tcg_gen_addi_i32(addr, addr, imm);
7924 op = (insn >> 8) & 7;
7927 case 0: case 8: /* Shifted Register. */
7928 shift = (insn >> 4) & 0xf;
7931 tmp = load_reg(s, rm);
7933 tcg_gen_shli_i32(tmp, tmp, shift);
7934 tcg_gen_add_i32(addr, addr, tmp);
7937 case 4: /* Negative offset. */
7938 tcg_gen_addi_i32(addr, addr, -imm);
7940 case 6: /* User privilege. */
7941 tcg_gen_addi_i32(addr, addr, imm);
7944 case 1: /* Post-decrement. */
7947 case 3: /* Post-increment. */
7951 case 5: /* Pre-decrement. */
7954 case 7: /* Pre-increment. */
7955 tcg_gen_addi_i32(addr, addr, imm);
7963 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7964 if (insn & (1 << 20)) {
7966 if (rs == 15 && op != 2) {
7969 /* Memory hint. Implemented as NOP. */
7972 case 0: tmp = gen_ld8u(addr, user); break;
7973 case 4: tmp = gen_ld8s(addr, user); break;
7974 case 1: tmp = gen_ld16u(addr, user); break;
7975 case 5: tmp = gen_ld16s(addr, user); break;
7976 case 2: tmp = gen_ld32(addr, user); break;
7977 default: goto illegal_op;
7982 store_reg(s, rs, tmp);
7989 tmp = load_reg(s, rs);
7991 case 0: gen_st8(tmp, addr, user); break;
7992 case 1: gen_st16(tmp, addr, user); break;
7993 case 2: gen_st32(tmp, addr, user); break;
7994 default: goto illegal_op;
7998 tcg_gen_addi_i32(addr, addr, imm);
8000 store_reg(s, rn, addr);
8014 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8016 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8023 if (s->condexec_mask) {
8024 cond = s->condexec_cond;
8025 s->condlabel = gen_new_label();
8026 gen_test_cc(cond ^ 1, s->condlabel);
8030 insn = lduw_code(s->pc);
8033 switch (insn >> 12) {
8036 op = (insn >> 11) & 3;
8039 rn = (insn >> 3) & 7;
8040 gen_movl_T0_reg(s, rn);
8041 if (insn & (1 << 10)) {
8043 gen_op_movl_T1_im((insn >> 6) & 7);
8046 rm = (insn >> 6) & 7;
8047 gen_movl_T1_reg(s, rm);
8049 if (insn & (1 << 9)) {
8050 if (s->condexec_mask)
8051 gen_op_subl_T0_T1();
8053 gen_op_subl_T0_T1_cc();
8055 if (s->condexec_mask)
8056 gen_op_addl_T0_T1();
8058 gen_op_addl_T0_T1_cc();
8060 gen_movl_reg_T0(s, rd);
8062 /* shift immediate */
8063 rm = (insn >> 3) & 7;
8064 shift = (insn >> 6) & 0x1f;
8065 tmp = load_reg(s, rm);
8066 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8067 if (!s->condexec_mask)
8069 store_reg(s, rd, tmp);
8073 /* arithmetic large immediate */
8074 op = (insn >> 11) & 3;
8075 rd = (insn >> 8) & 0x7;
8077 gen_op_movl_T0_im(insn & 0xff);
8079 gen_movl_T0_reg(s, rd);
8080 gen_op_movl_T1_im(insn & 0xff);
8084 if (!s->condexec_mask)
8085 gen_op_logic_T0_cc();
8088 gen_op_subl_T0_T1_cc();
8091 if (s->condexec_mask)
8092 gen_op_addl_T0_T1();
8094 gen_op_addl_T0_T1_cc();
8097 if (s->condexec_mask)
8098 gen_op_subl_T0_T1();
8100 gen_op_subl_T0_T1_cc();
8104 gen_movl_reg_T0(s, rd);
8107 if (insn & (1 << 11)) {
8108 rd = (insn >> 8) & 7;
8109 /* load pc-relative. Bit 1 of PC is ignored. */
8110 val = s->pc + 2 + ((insn & 0xff) * 4);
8111 val &= ~(uint32_t)2;
8113 tcg_gen_movi_i32(addr, val);
8114 tmp = gen_ld32(addr, IS_USER(s));
8116 store_reg(s, rd, tmp);
8119 if (insn & (1 << 10)) {
8120 /* data processing extended or blx */
8121 rd = (insn & 7) | ((insn >> 4) & 8);
8122 rm = (insn >> 3) & 0xf;
8123 op = (insn >> 8) & 3;
8126 gen_movl_T0_reg(s, rd);
8127 gen_movl_T1_reg(s, rm);
8128 gen_op_addl_T0_T1();
8129 gen_movl_reg_T0(s, rd);
8132 gen_movl_T0_reg(s, rd);
8133 gen_movl_T1_reg(s, rm);
8134 gen_op_subl_T0_T1_cc();
8136 case 2: /* mov/cpy */
8137 gen_movl_T0_reg(s, rm);
8138 gen_movl_reg_T0(s, rd);
8140 case 3:/* branch [and link] exchange thumb register */
8141 tmp = load_reg(s, rm);
8142 if (insn & (1 << 7)) {
8143 val = (uint32_t)s->pc | 1;
8145 tcg_gen_movi_i32(tmp2, val);
8146 store_reg(s, 14, tmp2);
8154 /* data processing register */
8156 rm = (insn >> 3) & 7;
8157 op = (insn >> 6) & 0xf;
8158 if (op == 2 || op == 3 || op == 4 || op == 7) {
8159 /* the shift/rotate ops want the operands backwards */
8168 if (op == 9) /* neg */
8169 gen_op_movl_T0_im(0);
8170 else if (op != 0xf) /* mvn doesn't read its first operand */
8171 gen_movl_T0_reg(s, rd);
8173 gen_movl_T1_reg(s, rm);
8176 gen_op_andl_T0_T1();
8177 if (!s->condexec_mask)
8178 gen_op_logic_T0_cc();
8181 gen_op_xorl_T0_T1();
8182 if (!s->condexec_mask)
8183 gen_op_logic_T0_cc();
8186 if (s->condexec_mask) {
8187 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8189 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8190 gen_op_logic_T1_cc();
8194 if (s->condexec_mask) {
8195 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8197 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8198 gen_op_logic_T1_cc();
8202 if (s->condexec_mask) {
8203 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8205 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8206 gen_op_logic_T1_cc();
8210 if (s->condexec_mask)
8213 gen_op_adcl_T0_T1_cc();
8216 if (s->condexec_mask)
8219 gen_op_sbcl_T0_T1_cc();
8222 if (s->condexec_mask) {
8223 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8225 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8226 gen_op_logic_T1_cc();
8230 gen_op_andl_T0_T1();
8231 gen_op_logic_T0_cc();
8235 if (s->condexec_mask)
8236 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8238 gen_op_subl_T0_T1_cc();
8241 gen_op_subl_T0_T1_cc();
8245 gen_op_addl_T0_T1_cc();
8250 if (!s->condexec_mask)
8251 gen_op_logic_T0_cc();
8254 gen_op_mull_T0_T1();
8255 if (!s->condexec_mask)
8256 gen_op_logic_T0_cc();
8259 gen_op_bicl_T0_T1();
8260 if (!s->condexec_mask)
8261 gen_op_logic_T0_cc();
8265 if (!s->condexec_mask)
8266 gen_op_logic_T1_cc();
8273 gen_movl_reg_T1(s, rm);
8275 gen_movl_reg_T0(s, rd);
8280 /* load/store register offset. */
8282 rn = (insn >> 3) & 7;
8283 rm = (insn >> 6) & 7;
8284 op = (insn >> 9) & 7;
8285 addr = load_reg(s, rn);
8286 tmp = load_reg(s, rm);
8287 tcg_gen_add_i32(addr, addr, tmp);
8290 if (op < 3) /* store */
8291 tmp = load_reg(s, rd);
8295 gen_st32(tmp, addr, IS_USER(s));
8298 gen_st16(tmp, addr, IS_USER(s));
8301 gen_st8(tmp, addr, IS_USER(s));
8304 tmp = gen_ld8s(addr, IS_USER(s));
8307 tmp = gen_ld32(addr, IS_USER(s));
8310 tmp = gen_ld16u(addr, IS_USER(s));
8313 tmp = gen_ld8u(addr, IS_USER(s));
8316 tmp = gen_ld16s(addr, IS_USER(s));
8319 if (op >= 3) /* load */
8320 store_reg(s, rd, tmp);
8325 /* load/store word immediate offset */
8327 rn = (insn >> 3) & 7;
8328 addr = load_reg(s, rn);
8329 val = (insn >> 4) & 0x7c;
8330 tcg_gen_addi_i32(addr, addr, val);
8332 if (insn & (1 << 11)) {
8334 tmp = gen_ld32(addr, IS_USER(s));
8335 store_reg(s, rd, tmp);
8338 tmp = load_reg(s, rd);
8339 gen_st32(tmp, addr, IS_USER(s));
8345 /* load/store byte immediate offset */
8347 rn = (insn >> 3) & 7;
8348 addr = load_reg(s, rn);
8349 val = (insn >> 6) & 0x1f;
8350 tcg_gen_addi_i32(addr, addr, val);
8352 if (insn & (1 << 11)) {
8354 tmp = gen_ld8u(addr, IS_USER(s));
8355 store_reg(s, rd, tmp);
8358 tmp = load_reg(s, rd);
8359 gen_st8(tmp, addr, IS_USER(s));
8365 /* load/store halfword immediate offset */
8367 rn = (insn >> 3) & 7;
8368 addr = load_reg(s, rn);
8369 val = (insn >> 5) & 0x3e;
8370 tcg_gen_addi_i32(addr, addr, val);
8372 if (insn & (1 << 11)) {
8374 tmp = gen_ld16u(addr, IS_USER(s));
8375 store_reg(s, rd, tmp);
8378 tmp = load_reg(s, rd);
8379 gen_st16(tmp, addr, IS_USER(s));
8385 /* load/store from stack */
8386 rd = (insn >> 8) & 7;
8387 addr = load_reg(s, 13);
8388 val = (insn & 0xff) * 4;
8389 tcg_gen_addi_i32(addr, addr, val);
8391 if (insn & (1 << 11)) {
8393 tmp = gen_ld32(addr, IS_USER(s));
8394 store_reg(s, rd, tmp);
8397 tmp = load_reg(s, rd);
8398 gen_st32(tmp, addr, IS_USER(s));
8404 /* add to high reg */
8405 rd = (insn >> 8) & 7;
8406 if (insn & (1 << 11)) {
8408 tmp = load_reg(s, 13);
8410 /* PC. bit 1 is ignored. */
8412 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8414 val = (insn & 0xff) * 4;
8415 tcg_gen_addi_i32(tmp, tmp, val);
8416 store_reg(s, rd, tmp);
8421 op = (insn >> 8) & 0xf;
8424 /* adjust stack pointer */
8425 tmp = load_reg(s, 13);
8426 val = (insn & 0x7f) * 4;
8427 if (insn & (1 << 7))
8428 val = -(int32_t)val;
8429 tcg_gen_addi_i32(tmp, tmp, val);
8430 store_reg(s, 13, tmp);
8433 case 2: /* sign/zero extend. */
8436 rm = (insn >> 3) & 7;
8437 tmp = load_reg(s, rm);
8438 switch ((insn >> 6) & 3) {
8439 case 0: gen_sxth(tmp); break;
8440 case 1: gen_sxtb(tmp); break;
8441 case 2: gen_uxth(tmp); break;
8442 case 3: gen_uxtb(tmp); break;
8444 store_reg(s, rd, tmp);
8446 case 4: case 5: case 0xc: case 0xd:
8448 addr = load_reg(s, 13);
8449 if (insn & (1 << 8))
8453 for (i = 0; i < 8; i++) {
8454 if (insn & (1 << i))
8457 if ((insn & (1 << 11)) == 0) {
8458 tcg_gen_addi_i32(addr, addr, -offset);
8460 for (i = 0; i < 8; i++) {
8461 if (insn & (1 << i)) {
8462 if (insn & (1 << 11)) {
8464 tmp = gen_ld32(addr, IS_USER(s));
8465 store_reg(s, i, tmp);
8468 tmp = load_reg(s, i);
8469 gen_st32(tmp, addr, IS_USER(s));
8471 /* advance to the next address. */
8472 tcg_gen_addi_i32(addr, addr, 4);
8476 if (insn & (1 << 8)) {
8477 if (insn & (1 << 11)) {
8479 tmp = gen_ld32(addr, IS_USER(s));
8480 /* don't set the pc until the rest of the instruction
8484 tmp = load_reg(s, 14);
8485 gen_st32(tmp, addr, IS_USER(s));
8487 tcg_gen_addi_i32(addr, addr, 4);
8489 if ((insn & (1 << 11)) == 0) {
8490 tcg_gen_addi_i32(addr, addr, -offset);
8492 /* write back the new stack pointer */
8493 store_reg(s, 13, addr);
8494 /* set the new PC value */
8495 if ((insn & 0x0900) == 0x0900)
8499 case 1: case 3: case 9: case 11: /* czb */
8501 tmp = load_reg(s, rm);
8502 s->condlabel = gen_new_label();
8504 if (insn & (1 << 11))
8505 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8507 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8509 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8510 val = (uint32_t)s->pc + 2;
8515 case 15: /* IT, nop-hint. */
8516 if ((insn & 0xf) == 0) {
8517 gen_nop_hint(s, (insn >> 4) & 0xf);
8521 s->condexec_cond = (insn >> 4) & 0xe;
8522 s->condexec_mask = insn & 0x1f;
8523 /* No actual code generated for this insn, just setup state. */
8526 case 0xe: /* bkpt */
8527 gen_set_condexec(s);
8528 gen_set_pc_im(s->pc - 2);
8529 gen_exception(EXCP_BKPT);
8530 s->is_jmp = DISAS_JUMP;
8535 rn = (insn >> 3) & 0x7;
8537 tmp = load_reg(s, rn);
8538 switch ((insn >> 6) & 3) {
8539 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8540 case 1: gen_rev16(tmp); break;
8541 case 3: gen_revsh(tmp); break;
8542 default: goto illegal_op;
8544 store_reg(s, rd, tmp);
8552 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8555 addr = tcg_const_i32(16);
8556 gen_helper_v7m_msr(cpu_env, addr, tmp);
8560 addr = tcg_const_i32(17);
8561 gen_helper_v7m_msr(cpu_env, addr, tmp);
8565 if (insn & (1 << 4))
8566 shift = CPSR_A | CPSR_I | CPSR_F;
8570 val = ((insn & 7) << 6) & shift;
8571 gen_op_movl_T0_im(val);
8572 gen_set_psr_T0(s, shift, 0);
8582 /* load/store multiple */
8583 rn = (insn >> 8) & 0x7;
8584 addr = load_reg(s, rn);
8585 for (i = 0; i < 8; i++) {
8586 if (insn & (1 << i)) {
8587 if (insn & (1 << 11)) {
8589 tmp = gen_ld32(addr, IS_USER(s));
8590 store_reg(s, i, tmp);
8593 tmp = load_reg(s, i);
8594 gen_st32(tmp, addr, IS_USER(s));
8596 /* advance to the next address */
8597 tcg_gen_addi_i32(addr, addr, 4);
8600 /* Base register writeback. */
8601 if ((insn & (1 << rn)) == 0) {
8602 store_reg(s, rn, addr);
8609 /* conditional branch or swi */
8610 cond = (insn >> 8) & 0xf;
8616 gen_set_condexec(s);
8617 gen_set_pc_im(s->pc);
8618 s->is_jmp = DISAS_SWI;
8621 /* generate a conditional jump to next instruction */
8622 s->condlabel = gen_new_label();
8623 gen_test_cc(cond ^ 1, s->condlabel);
8625 gen_movl_T1_reg(s, 15);
8627 /* jump to the offset */
8628 val = (uint32_t)s->pc + 2;
8629 offset = ((int32_t)insn << 24) >> 24;
8635 if (insn & (1 << 11)) {
8636 if (disas_thumb2_insn(env, s, insn))
8640 /* unconditional branch */
8641 val = (uint32_t)s->pc;
8642 offset = ((int32_t)insn << 21) >> 21;
8643 val += (offset << 1) + 2;
8648 if (disas_thumb2_insn(env, s, insn))
8654 gen_set_condexec(s);
8655 gen_set_pc_im(s->pc - 4);
8656 gen_exception(EXCP_UDEF);
8657 s->is_jmp = DISAS_JUMP;
8661 gen_set_condexec(s);
8662 gen_set_pc_im(s->pc - 2);
8663 gen_exception(EXCP_UDEF);
8664 s->is_jmp = DISAS_JUMP;
8667 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8668 basic block 'tb'. If search_pc is TRUE, also generate PC
8669 information for each intermediate instruction. */
8670 static inline void gen_intermediate_code_internal(CPUState *env,
8671 TranslationBlock *tb,
8674 DisasContext dc1, *dc = &dc1;
8676 uint16_t *gen_opc_end;
8678 target_ulong pc_start;
8679 uint32_t next_page_start;
8683 /* generate intermediate code */
8685 memset(temps, 0, sizeof(temps));
8691 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8693 dc->is_jmp = DISAS_NEXT;
8695 dc->singlestep_enabled = env->singlestep_enabled;
8697 dc->thumb = env->thumb;
8698 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8699 dc->condexec_cond = env->condexec_bits >> 4;
8700 #if !defined(CONFIG_USER_ONLY)
8702 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8704 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8707 cpu_F0s = tcg_temp_new_i32();
8708 cpu_F1s = tcg_temp_new_i32();
8709 cpu_F0d = tcg_temp_new_i64();
8710 cpu_F1d = tcg_temp_new_i64();
8713 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8714 cpu_M0 = tcg_temp_new_i64();
8715 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8718 max_insns = tb->cflags & CF_COUNT_MASK;
8720 max_insns = CF_COUNT_MASK;
8723 /* Reset the conditional execution bits immediately. This avoids
8724 complications trying to do it at the end of the block. */
8725 if (env->condexec_bits)
8727 TCGv tmp = new_tmp();
8728 tcg_gen_movi_i32(tmp, 0);
8729 store_cpu_field(tmp, condexec_bits);
8732 #ifdef CONFIG_USER_ONLY
8733 /* Intercept jump to the magic kernel page. */
8734 if (dc->pc >= 0xffff0000) {
8735 /* We always get here via a jump, so know we are not in a
8736 conditional execution block. */
8737 gen_exception(EXCP_KERNEL_TRAP);
8738 dc->is_jmp = DISAS_UPDATE;
8742 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8743 /* We always get here via a jump, so know we are not in a
8744 conditional execution block. */
8745 gen_exception(EXCP_EXCEPTION_EXIT);
8746 dc->is_jmp = DISAS_UPDATE;
8751 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8752 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
8753 if (bp->pc == dc->pc) {
8754 gen_set_condexec(dc);
8755 gen_set_pc_im(dc->pc);
8756 gen_exception(EXCP_DEBUG);
8757 dc->is_jmp = DISAS_JUMP;
8758 /* Advance PC so that clearing the breakpoint will
8759 invalidate this TB. */
8761 goto done_generating;
8767 j = gen_opc_ptr - gen_opc_buf;
8771 gen_opc_instr_start[lj++] = 0;
8773 gen_opc_pc[lj] = dc->pc;
8774 gen_opc_instr_start[lj] = 1;
8775 gen_opc_icount[lj] = num_insns;
8778 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8782 disas_thumb_insn(env, dc);
8783 if (dc->condexec_mask) {
8784 dc->condexec_cond = (dc->condexec_cond & 0xe)
8785 | ((dc->condexec_mask >> 4) & 1);
8786 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8787 if (dc->condexec_mask == 0) {
8788 dc->condexec_cond = 0;
8792 disas_arm_insn(env, dc);
8795 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8799 if (dc->condjmp && !dc->is_jmp) {
8800 gen_set_label(dc->condlabel);
8803 /* Translation stops when a conditional branch is encountered.
8804 * Otherwise the subsequent code could get translated several times.
8805 * Also stop translation when a page boundary is reached. This
8806 * ensures prefetch aborts occur at the right place. */
8808 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8809 !env->singlestep_enabled &&
8810 dc->pc < next_page_start &&
8811 num_insns < max_insns);
8813 if (tb->cflags & CF_LAST_IO) {
8815 /* FIXME: This can theoretically happen with self-modifying
8817 cpu_abort(env, "IO on conditional branch instruction");
8822 /* At this stage dc->condjmp will only be set when the skipped
8823 instruction was a conditional branch or trap, and the PC has
8824 already been written. */
8825 if (unlikely(env->singlestep_enabled)) {
8826 /* Make sure the pc is updated, and raise a debug exception. */
8828 gen_set_condexec(dc);
8829 if (dc->is_jmp == DISAS_SWI) {
8830 gen_exception(EXCP_SWI);
8832 gen_exception(EXCP_DEBUG);
8834 gen_set_label(dc->condlabel);
8836 if (dc->condjmp || !dc->is_jmp) {
8837 gen_set_pc_im(dc->pc);
8840 gen_set_condexec(dc);
8841 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8842 gen_exception(EXCP_SWI);
8844 /* FIXME: Single stepping a WFI insn will not halt
8846 gen_exception(EXCP_DEBUG);
8849 /* While branches must always occur at the end of an IT block,
8850 there are a few other things that can cause us to terminate
8851 the TB in the middel of an IT block:
8852 - Exception generating instructions (bkpt, swi, undefined).
8854 - Hardware watchpoints.
8855 Hardware breakpoints have already been handled and skip this code.
8857 gen_set_condexec(dc);
8858 switch(dc->is_jmp) {
8860 gen_goto_tb(dc, 1, dc->pc);
8865 /* indicate that the hash table must be used to find the next TB */
8869 /* nothing more to generate */
8875 gen_exception(EXCP_SWI);
8879 gen_set_label(dc->condlabel);
8880 gen_set_condexec(dc);
8881 gen_goto_tb(dc, 1, dc->pc);
8887 gen_icount_end(tb, num_insns);
8888 *gen_opc_ptr = INDEX_op_end;
8891 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8892 qemu_log("----------------\n");
8893 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8894 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8899 j = gen_opc_ptr - gen_opc_buf;
8902 gen_opc_instr_start[lj++] = 0;
8904 tb->size = dc->pc - pc_start;
8905 tb->icount = num_insns;
8909 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8911 gen_intermediate_code_internal(env, tb, 0);
8914 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8916 gen_intermediate_code_internal(env, tb, 1);
8919 static const char *cpu_mode_names[16] = {
8920 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8921 "???", "???", "???", "und", "???", "???", "???", "sys"
8924 void cpu_dump_state(CPUState *env, FILE *f,
8925 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8935 /* ??? This assumes float64 and double have the same layout.
8936 Oh well, it's only debug dumps. */
8945 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8947 cpu_fprintf(f, "\n");
8949 cpu_fprintf(f, " ");
8951 psr = cpsr_read(env);
8952 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8954 psr & (1 << 31) ? 'N' : '-',
8955 psr & (1 << 30) ? 'Z' : '-',
8956 psr & (1 << 29) ? 'C' : '-',
8957 psr & (1 << 28) ? 'V' : '-',
8958 psr & CPSR_T ? 'T' : 'A',
8959 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8962 for (i = 0; i < 16; i++) {
8963 d.d = env->vfp.regs[i];
8967 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8968 i * 2, (int)s0.i, s0.s,
8969 i * 2 + 1, (int)s1.i, s1.s,
8970 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8973 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8977 void gen_pc_load(CPUState *env, TranslationBlock *tb,
8978 unsigned long searched_pc, int pc_pos, void *puc)
8980 env->regs[15] = gen_opc_pc[pc_pos];