2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include "host-utils.h"
25 #include "helper_regs.h"
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
34 # define LOG_SWTLB(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
44 printf("Raise exception %3x code : %d\n", exception, error_code);
46 env->exception_index = exception;
47 env->error_code = error_code;
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* Registers load and stores */
58 target_ulong helper_load_cr (void)
60 return (env->crf[0] << 28) |
70 void helper_store_cr (target_ulong val, uint32_t mask)
74 for (i = 0, sh = 7; i < 8; i++, sh--) {
76 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
80 /*****************************************************************************/
82 void helper_load_dump_spr (uint32_t sprn)
84 qemu_log("Read SPR %d %03x => " ADDRX "\n",
85 sprn, sprn, env->spr[sprn]);
88 void helper_store_dump_spr (uint32_t sprn)
90 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91 sprn, sprn, env->spr[sprn]);
94 target_ulong helper_load_tbl (void)
96 return cpu_ppc_load_tbl(env);
99 target_ulong helper_load_tbu (void)
101 return cpu_ppc_load_tbu(env);
104 target_ulong helper_load_atbl (void)
106 return cpu_ppc_load_atbl(env);
109 target_ulong helper_load_atbu (void)
111 return cpu_ppc_load_atbu(env);
114 target_ulong helper_load_601_rtcl (void)
116 return cpu_ppc601_load_rtcl(env);
119 target_ulong helper_load_601_rtcu (void)
121 return cpu_ppc601_load_rtcu(env);
124 #if !defined(CONFIG_USER_ONLY)
125 #if defined (TARGET_PPC64)
126 void helper_store_asr (target_ulong val)
128 ppc_store_asr(env, val);
132 void helper_store_sdr1 (target_ulong val)
134 ppc_store_sdr1(env, val);
137 void helper_store_tbl (target_ulong val)
139 cpu_ppc_store_tbl(env, val);
142 void helper_store_tbu (target_ulong val)
144 cpu_ppc_store_tbu(env, val);
147 void helper_store_atbl (target_ulong val)
149 cpu_ppc_store_atbl(env, val);
152 void helper_store_atbu (target_ulong val)
154 cpu_ppc_store_atbu(env, val);
157 void helper_store_601_rtcl (target_ulong val)
159 cpu_ppc601_store_rtcl(env, val);
162 void helper_store_601_rtcu (target_ulong val)
164 cpu_ppc601_store_rtcu(env, val);
167 target_ulong helper_load_decr (void)
169 return cpu_ppc_load_decr(env);
172 void helper_store_decr (target_ulong val)
174 cpu_ppc_store_decr(env, val);
177 void helper_store_hid0_601 (target_ulong val)
181 hid0 = env->spr[SPR_HID0];
182 if ((val ^ hid0) & 0x00000008) {
183 /* Change current endianness */
184 env->hflags &= ~(1 << MSR_LE);
185 env->hflags_nmsr &= ~(1 << MSR_LE);
186 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187 env->hflags |= env->hflags_nmsr;
188 qemu_log("%s: set endianness to %c => " ADDRX "\n",
189 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
191 env->spr[SPR_HID0] = (uint32_t)val;
194 void helper_store_403_pbr (uint32_t num, target_ulong value)
196 if (likely(env->pb[num] != value)) {
197 env->pb[num] = value;
198 /* Should be optimized */
203 target_ulong helper_load_40x_pit (void)
205 return load_40x_pit(env);
208 void helper_store_40x_pit (target_ulong val)
210 store_40x_pit(env, val);
213 void helper_store_40x_dbcr0 (target_ulong val)
215 store_40x_dbcr0(env, val);
218 void helper_store_40x_sler (target_ulong val)
220 store_40x_sler(env, val);
223 void helper_store_booke_tcr (target_ulong val)
225 store_booke_tcr(env, val);
228 void helper_store_booke_tsr (target_ulong val)
230 store_booke_tsr(env, val);
233 void helper_store_ibatu (uint32_t nr, target_ulong val)
235 ppc_store_ibatu(env, nr, val);
238 void helper_store_ibatl (uint32_t nr, target_ulong val)
240 ppc_store_ibatl(env, nr, val);
243 void helper_store_dbatu (uint32_t nr, target_ulong val)
245 ppc_store_dbatu(env, nr, val);
248 void helper_store_dbatl (uint32_t nr, target_ulong val)
250 ppc_store_dbatl(env, nr, val);
253 void helper_store_601_batl (uint32_t nr, target_ulong val)
255 ppc_store_ibatl_601(env, nr, val);
258 void helper_store_601_batu (uint32_t nr, target_ulong val)
260 ppc_store_ibatu_601(env, nr, val);
264 /*****************************************************************************/
265 /* Memory load and stores */
267 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
269 #if defined(TARGET_PPC64)
271 return (uint32_t)(addr + arg);
277 void helper_lmw (target_ulong addr, uint32_t reg)
279 for (; reg < 32; reg++) {
281 env->gpr[reg] = bswap32(ldl(addr));
283 env->gpr[reg] = ldl(addr);
284 addr = addr_add(addr, 4);
288 void helper_stmw (target_ulong addr, uint32_t reg)
290 for (; reg < 32; reg++) {
292 stl(addr, bswap32((uint32_t)env->gpr[reg]));
294 stl(addr, (uint32_t)env->gpr[reg]);
295 addr = addr_add(addr, 4);
299 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
302 for (; nb > 3; nb -= 4) {
303 env->gpr[reg] = ldl(addr);
304 reg = (reg + 1) % 32;
305 addr = addr_add(addr, 4);
307 if (unlikely(nb > 0)) {
309 for (sh = 24; nb > 0; nb--, sh -= 8) {
310 env->gpr[reg] |= ldub(addr) << sh;
311 addr = addr_add(addr, 1);
315 /* PPC32 specification says we must generate an exception if
316 * rA is in the range of registers to be loaded.
317 * In an other hand, IBM says this is valid, but rA won't be loaded.
318 * For now, I'll follow the spec...
320 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
322 if (likely(xer_bc != 0)) {
323 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324 (reg < rb && (reg + xer_bc) > rb))) {
325 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
327 POWERPC_EXCP_INVAL_LSWX);
329 helper_lsw(addr, xer_bc, reg);
334 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
337 for (; nb > 3; nb -= 4) {
338 stl(addr, env->gpr[reg]);
339 reg = (reg + 1) % 32;
340 addr = addr_add(addr, 4);
342 if (unlikely(nb > 0)) {
343 for (sh = 24; nb > 0; nb--, sh -= 8) {
344 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345 addr = addr_add(addr, 1);
350 static void do_dcbz(target_ulong addr, int dcache_line_size)
352 addr &= ~(dcache_line_size - 1);
354 for (i = 0 ; i < dcache_line_size ; i += 4) {
357 if (env->reserve == addr)
358 env->reserve = (target_ulong)-1ULL;
361 void helper_dcbz(target_ulong addr)
363 do_dcbz(addr, env->dcache_line_size);
366 void helper_dcbz_970(target_ulong addr)
368 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
371 do_dcbz(addr, env->dcache_line_size);
374 void helper_icbi(target_ulong addr)
378 addr &= ~(env->dcache_line_size - 1);
379 /* Invalidate one cache line :
380 * PowerPC specification says this is to be treated like a load
381 * (not a fetch) by the MMU. To be sure it will be so,
382 * do the load "by hand".
385 tb_invalidate_page_range(addr, addr + env->icache_line_size);
389 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
393 for (i = 0; i < xer_bc; i++) {
395 addr = addr_add(addr, 1);
396 /* ra (if not 0) and rb are never modified */
397 if (likely(reg != rb && (ra == 0 || reg != ra))) {
398 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
400 if (unlikely(c == xer_cmp))
402 if (likely(d != 0)) {
413 /*****************************************************************************/
414 /* Fixed point operations helpers */
415 #if defined(TARGET_PPC64)
417 /* multiply high word */
418 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
422 muls64(&tl, &th, arg1, arg2);
426 /* multiply high word unsigned */
427 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
431 mulu64(&tl, &th, arg1, arg2);
435 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
440 muls64(&tl, (uint64_t *)&th, arg1, arg2);
441 /* If th != 0 && th != -1, then we had an overflow */
442 if (likely((uint64_t)(th + 1) <= 1)) {
443 env->xer &= ~(1 << XER_OV);
445 env->xer |= (1 << XER_OV) | (1 << XER_SO);
451 target_ulong helper_cntlzw (target_ulong t)
456 #if defined(TARGET_PPC64)
457 target_ulong helper_cntlzd (target_ulong t)
463 /* shift right arithmetic helper */
464 target_ulong helper_sraw (target_ulong value, target_ulong shift)
468 if (likely(!(shift & 0x20))) {
469 if (likely((uint32_t)shift != 0)) {
471 ret = (int32_t)value >> shift;
472 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473 env->xer &= ~(1 << XER_CA);
475 env->xer |= (1 << XER_CA);
478 ret = (int32_t)value;
479 env->xer &= ~(1 << XER_CA);
482 ret = (int32_t)value >> 31;
484 env->xer |= (1 << XER_CA);
486 env->xer &= ~(1 << XER_CA);
489 return (target_long)ret;
492 #if defined(TARGET_PPC64)
493 target_ulong helper_srad (target_ulong value, target_ulong shift)
497 if (likely(!(shift & 0x40))) {
498 if (likely((uint64_t)shift != 0)) {
500 ret = (int64_t)value >> shift;
501 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502 env->xer &= ~(1 << XER_CA);
504 env->xer |= (1 << XER_CA);
507 ret = (int64_t)value;
508 env->xer &= ~(1 << XER_CA);
511 ret = (int64_t)value >> 63;
513 env->xer |= (1 << XER_CA);
515 env->xer &= ~(1 << XER_CA);
522 target_ulong helper_popcntb (target_ulong val)
524 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
525 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
526 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
530 #if defined(TARGET_PPC64)
531 target_ulong helper_popcntb_64 (target_ulong val)
533 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
534 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
535 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
540 /*****************************************************************************/
541 /* Floating point operations helpers */
542 uint64_t helper_float32_to_float64(uint32_t arg)
547 d.d = float32_to_float64(f.f, &env->fp_status);
551 uint32_t helper_float64_to_float32(uint64_t arg)
556 f.f = float64_to_float32(d.d, &env->fp_status);
560 static always_inline int isden (float64 d)
566 return ((u.ll >> 52) & 0x7FF) == 0;
569 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
575 isneg = float64_is_neg(farg.d);
576 if (unlikely(float64_is_nan(farg.d))) {
577 if (float64_is_signaling_nan(farg.d)) {
578 /* Signaling NaN: flags are undefined */
584 } else if (unlikely(float64_is_infinity(farg.d))) {
591 if (float64_is_zero(farg.d)) {
599 /* Denormalized numbers */
602 /* Normalized numbers */
613 /* We update FPSCR_FPRF */
614 env->fpscr &= ~(0x1F << FPSCR_FPRF);
615 env->fpscr |= ret << FPSCR_FPRF;
617 /* We just need fpcc to update Rc1 */
621 /* Floating-point invalid operations exception */
622 static always_inline uint64_t fload_invalid_op_excp (int op)
629 case POWERPC_EXCP_FP_VXSNAN:
630 env->fpscr |= 1 << FPSCR_VXSNAN;
632 case POWERPC_EXCP_FP_VXSOFT:
633 env->fpscr |= 1 << FPSCR_VXSOFT;
635 case POWERPC_EXCP_FP_VXISI:
636 /* Magnitude subtraction of infinities */
637 env->fpscr |= 1 << FPSCR_VXISI;
639 case POWERPC_EXCP_FP_VXIDI:
640 /* Division of infinity by infinity */
641 env->fpscr |= 1 << FPSCR_VXIDI;
643 case POWERPC_EXCP_FP_VXZDZ:
644 /* Division of zero by zero */
645 env->fpscr |= 1 << FPSCR_VXZDZ;
647 case POWERPC_EXCP_FP_VXIMZ:
648 /* Multiplication of zero by infinity */
649 env->fpscr |= 1 << FPSCR_VXIMZ;
651 case POWERPC_EXCP_FP_VXVC:
652 /* Ordered comparison of NaN */
653 env->fpscr |= 1 << FPSCR_VXVC;
654 env->fpscr &= ~(0xF << FPSCR_FPCC);
655 env->fpscr |= 0x11 << FPSCR_FPCC;
656 /* We must update the target FPR before raising the exception */
658 env->exception_index = POWERPC_EXCP_PROGRAM;
659 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660 /* Update the floating-point enabled exception summary */
661 env->fpscr |= 1 << FPSCR_FEX;
662 /* Exception is differed */
666 case POWERPC_EXCP_FP_VXSQRT:
667 /* Square root of a negative number */
668 env->fpscr |= 1 << FPSCR_VXSQRT;
670 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
672 /* Set the result to quiet NaN */
673 ret = 0xFFF8000000000000ULL;
674 env->fpscr &= ~(0xF << FPSCR_FPCC);
675 env->fpscr |= 0x11 << FPSCR_FPCC;
678 case POWERPC_EXCP_FP_VXCVI:
679 /* Invalid conversion */
680 env->fpscr |= 1 << FPSCR_VXCVI;
681 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683 /* Set the result to quiet NaN */
684 ret = 0xFFF8000000000000ULL;
685 env->fpscr &= ~(0xF << FPSCR_FPCC);
686 env->fpscr |= 0x11 << FPSCR_FPCC;
690 /* Update the floating-point invalid operation summary */
691 env->fpscr |= 1 << FPSCR_VX;
692 /* Update the floating-point exception summary */
693 env->fpscr |= 1 << FPSCR_FX;
695 /* Update the floating-point enabled exception summary */
696 env->fpscr |= 1 << FPSCR_FEX;
697 if (msr_fe0 != 0 || msr_fe1 != 0)
698 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
703 static always_inline void float_zero_divide_excp (void)
705 env->fpscr |= 1 << FPSCR_ZX;
706 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707 /* Update the floating-point exception summary */
708 env->fpscr |= 1 << FPSCR_FX;
710 /* Update the floating-point enabled exception summary */
711 env->fpscr |= 1 << FPSCR_FEX;
712 if (msr_fe0 != 0 || msr_fe1 != 0) {
713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
719 static always_inline void float_overflow_excp (void)
721 env->fpscr |= 1 << FPSCR_OX;
722 /* Update the floating-point exception summary */
723 env->fpscr |= 1 << FPSCR_FX;
725 /* XXX: should adjust the result */
726 /* Update the floating-point enabled exception summary */
727 env->fpscr |= 1 << FPSCR_FEX;
728 /* We must update the target FPR before raising the exception */
729 env->exception_index = POWERPC_EXCP_PROGRAM;
730 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
732 env->fpscr |= 1 << FPSCR_XX;
733 env->fpscr |= 1 << FPSCR_FI;
737 static always_inline void float_underflow_excp (void)
739 env->fpscr |= 1 << FPSCR_UX;
740 /* Update the floating-point exception summary */
741 env->fpscr |= 1 << FPSCR_FX;
743 /* XXX: should adjust the result */
744 /* Update the floating-point enabled exception summary */
745 env->fpscr |= 1 << FPSCR_FEX;
746 /* We must update the target FPR before raising the exception */
747 env->exception_index = POWERPC_EXCP_PROGRAM;
748 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
752 static always_inline void float_inexact_excp (void)
754 env->fpscr |= 1 << FPSCR_XX;
755 /* Update the floating-point exception summary */
756 env->fpscr |= 1 << FPSCR_FX;
758 /* Update the floating-point enabled exception summary */
759 env->fpscr |= 1 << FPSCR_FEX;
760 /* We must update the target FPR before raising the exception */
761 env->exception_index = POWERPC_EXCP_PROGRAM;
762 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
766 static always_inline void fpscr_set_rounding_mode (void)
770 /* Set rounding mode */
773 /* Best approximation (round to nearest) */
774 rnd_type = float_round_nearest_even;
777 /* Smaller magnitude (round toward zero) */
778 rnd_type = float_round_to_zero;
781 /* Round toward +infinite */
782 rnd_type = float_round_up;
786 /* Round toward -infinite */
787 rnd_type = float_round_down;
790 set_float_rounding_mode(rnd_type, &env->fp_status);
793 void helper_fpscr_clrbit (uint32_t bit)
797 prev = (env->fpscr >> bit) & 1;
798 env->fpscr &= ~(1 << bit);
803 fpscr_set_rounding_mode();
811 void helper_fpscr_setbit (uint32_t bit)
815 prev = (env->fpscr >> bit) & 1;
816 env->fpscr |= 1 << bit;
820 env->fpscr |= 1 << FPSCR_FX;
824 env->fpscr |= 1 << FPSCR_FX;
829 env->fpscr |= 1 << FPSCR_FX;
834 env->fpscr |= 1 << FPSCR_FX;
839 env->fpscr |= 1 << FPSCR_FX;
852 env->fpscr |= 1 << FPSCR_VX;
853 env->fpscr |= 1 << FPSCR_FX;
860 env->error_code = POWERPC_EXCP_FP;
862 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
864 env->error_code |= POWERPC_EXCP_FP_VXISI;
866 env->error_code |= POWERPC_EXCP_FP_VXIDI;
868 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
870 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
872 env->error_code |= POWERPC_EXCP_FP_VXVC;
874 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
876 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
878 env->error_code |= POWERPC_EXCP_FP_VXCVI;
885 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
892 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
899 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
906 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
912 fpscr_set_rounding_mode();
917 /* Update the floating-point enabled exception summary */
918 env->fpscr |= 1 << FPSCR_FEX;
919 /* We have to update Rc1 before raising the exception */
920 env->exception_index = POWERPC_EXCP_PROGRAM;
926 void helper_store_fpscr (uint64_t arg, uint32_t mask)
929 * We use only the 32 LSB of the incoming fpr
937 new |= prev & 0x60000000;
938 for (i = 0; i < 8; i++) {
939 if (mask & (1 << i)) {
940 env->fpscr &= ~(0xF << (4 * i));
941 env->fpscr |= new & (0xF << (4 * i));
944 /* Update VX and FEX */
946 env->fpscr |= 1 << FPSCR_VX;
948 env->fpscr &= ~(1 << FPSCR_VX);
949 if ((fpscr_ex & fpscr_eex) != 0) {
950 env->fpscr |= 1 << FPSCR_FEX;
951 env->exception_index = POWERPC_EXCP_PROGRAM;
952 /* XXX: we should compute it properly */
953 env->error_code = POWERPC_EXCP_FP;
956 env->fpscr &= ~(1 << FPSCR_FEX);
957 fpscr_set_rounding_mode();
960 void helper_float_check_status (void)
962 #ifdef CONFIG_SOFTFLOAT
963 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964 (env->error_code & POWERPC_EXCP_FP)) {
965 /* Differred floating-point exception after target FPR update */
966 if (msr_fe0 != 0 || msr_fe1 != 0)
967 helper_raise_exception_err(env->exception_index, env->error_code);
969 int status = get_float_exception_flags(&env->fp_status);
970 if (status & float_flag_divbyzero) {
971 float_zero_divide_excp();
972 } else if (status & float_flag_overflow) {
973 float_overflow_excp();
974 } else if (status & float_flag_underflow) {
975 float_underflow_excp();
976 } else if (status & float_flag_inexact) {
977 float_inexact_excp();
981 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982 (env->error_code & POWERPC_EXCP_FP)) {
983 /* Differred floating-point exception after target FPR update */
984 if (msr_fe0 != 0 || msr_fe1 != 0)
985 helper_raise_exception_err(env->exception_index, env->error_code);
990 #ifdef CONFIG_SOFTFLOAT
991 void helper_reset_fpstatus (void)
993 set_float_exception_flags(0, &env->fp_status);
998 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1000 CPU_DoubleU farg1, farg2;
1004 #if USE_PRECISE_EMULATION
1005 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006 float64_is_signaling_nan(farg2.d))) {
1008 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1014 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1023 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1025 CPU_DoubleU farg1, farg2;
1029 #if USE_PRECISE_EMULATION
1031 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032 float64_is_signaling_nan(farg2.d))) {
1033 /* sNaN subtraction */
1034 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037 /* Magnitude subtraction of infinities */
1038 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1040 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1050 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1052 CPU_DoubleU farg1, farg2;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058 float64_is_signaling_nan(farg2.d))) {
1059 /* sNaN multiplication */
1060 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063 /* Multiplication of zero by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1066 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1075 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1077 CPU_DoubleU farg1, farg2;
1081 #if USE_PRECISE_EMULATION
1082 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083 float64_is_signaling_nan(farg2.d))) {
1085 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087 /* Division of infinity by infinity */
1088 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090 /* Division of zero by zero */
1091 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1093 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1102 uint64_t helper_fabs (uint64_t arg)
1107 farg.d = float64_abs(farg.d);
1112 uint64_t helper_fnabs (uint64_t arg)
1117 farg.d = float64_abs(farg.d);
1118 farg.d = float64_chs(farg.d);
1123 uint64_t helper_fneg (uint64_t arg)
1128 farg.d = float64_chs(farg.d);
1132 /* fctiw - fctiw. */
1133 uint64_t helper_fctiw (uint64_t arg)
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1145 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1156 /* fctiwz - fctiwz. */
1157 uint64_t helper_fctiwz (uint64_t arg)
1162 if (unlikely(float64_is_signaling_nan(farg.d))) {
1163 /* sNaN conversion */
1164 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166 /* qNan / infinity conversion */
1167 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1169 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170 #if USE_PRECISE_EMULATION
1171 /* XXX: higher bits are not supposed to be significant.
1172 * to make tests easier, return the same as a real PowerPC 750
1174 farg.ll |= 0xFFF80000ULL << 32;
1180 #if defined(TARGET_PPC64)
1181 /* fcfid - fcfid. */
1182 uint64_t helper_fcfid (uint64_t arg)
1185 farg.d = int64_to_float64(arg, &env->fp_status);
1189 /* fctid - fctid. */
1190 uint64_t helper_fctid (uint64_t arg)
1195 if (unlikely(float64_is_signaling_nan(farg.d))) {
1196 /* sNaN conversion */
1197 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199 /* qNan / infinity conversion */
1200 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1202 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1207 /* fctidz - fctidz. */
1208 uint64_t helper_fctidz (uint64_t arg)
1213 if (unlikely(float64_is_signaling_nan(farg.d))) {
1214 /* sNaN conversion */
1215 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217 /* qNan / infinity conversion */
1218 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1220 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1227 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1232 if (unlikely(float64_is_signaling_nan(farg.d))) {
1234 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236 /* qNan / infinity round */
1237 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1239 set_float_rounding_mode(rounding_mode, &env->fp_status);
1240 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241 /* Restore rounding mode from FPSCR */
1242 fpscr_set_rounding_mode();
1247 uint64_t helper_frin (uint64_t arg)
1249 return do_fri(arg, float_round_nearest_even);
1252 uint64_t helper_friz (uint64_t arg)
1254 return do_fri(arg, float_round_to_zero);
1257 uint64_t helper_frip (uint64_t arg)
1259 return do_fri(arg, float_round_up);
1262 uint64_t helper_frim (uint64_t arg)
1264 return do_fri(arg, float_round_down);
1267 /* fmadd - fmadd. */
1268 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1270 CPU_DoubleU farg1, farg2, farg3;
1275 #if USE_PRECISE_EMULATION
1276 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277 float64_is_signaling_nan(farg2.d) ||
1278 float64_is_signaling_nan(farg3.d))) {
1279 /* sNaN operation */
1280 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283 /* Multiplication of zero by infinity */
1284 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1287 /* This is the way the PowerPC specification defines it */
1288 float128 ft0_128, ft1_128;
1290 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295 /* Magnitude subtraction of infinities */
1296 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1298 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1303 /* This is OK on x86 hosts */
1304 farg1.d = (farg1.d * farg2.d) + farg3.d;
1308 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1314 /* fmsub - fmsub. */
1315 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1317 CPU_DoubleU farg1, farg2, farg3;
1322 #if USE_PRECISE_EMULATION
1323 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324 float64_is_signaling_nan(farg2.d) ||
1325 float64_is_signaling_nan(farg3.d))) {
1326 /* sNaN operation */
1327 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330 /* Multiplication of zero by infinity */
1331 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1334 /* This is the way the PowerPC specification defines it */
1335 float128 ft0_128, ft1_128;
1337 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342 /* Magnitude subtraction of infinities */
1343 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1345 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1350 /* This is OK on x86 hosts */
1351 farg1.d = (farg1.d * farg2.d) - farg3.d;
1355 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1361 /* fnmadd - fnmadd. */
1362 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1364 CPU_DoubleU farg1, farg2, farg3;
1370 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371 float64_is_signaling_nan(farg2.d) ||
1372 float64_is_signaling_nan(farg3.d))) {
1373 /* sNaN operation */
1374 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377 /* Multiplication of zero by infinity */
1378 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1380 #if USE_PRECISE_EMULATION
1382 /* This is the way the PowerPC specification defines it */
1383 float128 ft0_128, ft1_128;
1385 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390 /* Magnitude subtraction of infinities */
1391 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1393 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1398 /* This is OK on x86 hosts */
1399 farg1.d = (farg1.d * farg2.d) + farg3.d;
1402 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1405 if (likely(!float64_is_nan(farg1.d)))
1406 farg1.d = float64_chs(farg1.d);
1411 /* fnmsub - fnmsub. */
1412 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1414 CPU_DoubleU farg1, farg2, farg3;
1420 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421 float64_is_signaling_nan(farg2.d) ||
1422 float64_is_signaling_nan(farg3.d))) {
1423 /* sNaN operation */
1424 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427 /* Multiplication of zero by infinity */
1428 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1430 #if USE_PRECISE_EMULATION
1432 /* This is the way the PowerPC specification defines it */
1433 float128 ft0_128, ft1_128;
1435 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440 /* Magnitude subtraction of infinities */
1441 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1443 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1448 /* This is OK on x86 hosts */
1449 farg1.d = (farg1.d * farg2.d) - farg3.d;
1452 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1455 if (likely(!float64_is_nan(farg1.d)))
1456 farg1.d = float64_chs(farg1.d);
1462 uint64_t helper_frsp (uint64_t arg)
1468 #if USE_PRECISE_EMULATION
1469 if (unlikely(float64_is_signaling_nan(farg.d))) {
1470 /* sNaN square root */
1471 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1473 f32 = float64_to_float32(farg.d, &env->fp_status);
1474 farg.d = float32_to_float64(f32, &env->fp_status);
1477 f32 = float64_to_float32(farg.d, &env->fp_status);
1478 farg.d = float32_to_float64(f32, &env->fp_status);
1483 /* fsqrt - fsqrt. */
1484 uint64_t helper_fsqrt (uint64_t arg)
1489 if (unlikely(float64_is_signaling_nan(farg.d))) {
1490 /* sNaN square root */
1491 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493 /* Square root of a negative nonzero number */
1494 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1496 farg.d = float64_sqrt(farg.d, &env->fp_status);
1502 uint64_t helper_fre (uint64_t arg)
1507 if (unlikely(float64_is_signaling_nan(farg.d))) {
1508 /* sNaN reciprocal */
1509 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1511 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1517 uint64_t helper_fres (uint64_t arg)
1523 if (unlikely(float64_is_signaling_nan(farg.d))) {
1524 /* sNaN reciprocal */
1525 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1528 f32 = float64_to_float32(farg.d, &env->fp_status);
1529 farg.d = float32_to_float64(f32, &env->fp_status);
1534 /* frsqrte - frsqrte. */
1535 uint64_t helper_frsqrte (uint64_t arg)
1541 if (unlikely(float64_is_signaling_nan(farg.d))) {
1542 /* sNaN reciprocal square root */
1543 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1544 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1545 /* Reciprocal square root of a negative nonzero number */
1546 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1548 farg.d = float64_sqrt(farg.d, &env->fp_status);
1549 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1550 f32 = float64_to_float32(farg.d, &env->fp_status);
1551 farg.d = float32_to_float64(f32, &env->fp_status);
1557 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1563 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1569 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1571 CPU_DoubleU farg1, farg2;
1576 if (unlikely(float64_is_nan(farg1.d) ||
1577 float64_is_nan(farg2.d))) {
1579 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1581 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1587 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1588 env->fpscr |= ret << FPSCR_FPRF;
1589 env->crf[crfD] = ret;
1590 if (unlikely(ret == 0x01UL
1591 && (float64_is_signaling_nan(farg1.d) ||
1592 float64_is_signaling_nan(farg2.d)))) {
1593 /* sNaN comparison */
1594 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1598 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1600 CPU_DoubleU farg1, farg2;
1605 if (unlikely(float64_is_nan(farg1.d) ||
1606 float64_is_nan(farg2.d))) {
1608 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1610 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1616 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1617 env->fpscr |= ret << FPSCR_FPRF;
1618 env->crf[crfD] = ret;
1619 if (unlikely (ret == 0x01UL)) {
1620 if (float64_is_signaling_nan(farg1.d) ||
1621 float64_is_signaling_nan(farg2.d)) {
1622 /* sNaN comparison */
1623 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1624 POWERPC_EXCP_FP_VXVC);
1626 /* qNaN comparison */
1627 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1632 #if !defined (CONFIG_USER_ONLY)
1633 void helper_store_msr (target_ulong val)
1635 val = hreg_store_msr(env, val, 0);
1637 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1638 helper_raise_exception(val);
1642 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1643 target_ulong msrm, int keep_msrh)
1645 #if defined(TARGET_PPC64)
1646 if (msr & (1ULL << MSR_SF)) {
1647 nip = (uint64_t)nip;
1648 msr &= (uint64_t)msrm;
1650 nip = (uint32_t)nip;
1651 msr = (uint32_t)(msr & msrm);
1653 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656 nip = (uint32_t)nip;
1657 msr &= (uint32_t)msrm;
1659 /* XXX: beware: this is false if VLE is supported */
1660 env->nip = nip & ~((target_ulong)0x00000003);
1661 hreg_store_msr(env, msr, 1);
1662 #if defined (DEBUG_OP)
1663 cpu_dump_rfi(env->nip, env->msr);
1665 /* No need to raise an exception here,
1666 * as rfi is always the last insn of a TB
1668 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671 void helper_rfi (void)
1673 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1674 ~((target_ulong)0xFFFF0000), 1);
1677 #if defined(TARGET_PPC64)
1678 void helper_rfid (void)
1680 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1681 ~((target_ulong)0xFFFF0000), 0);
1684 void helper_hrfid (void)
1686 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1687 ~((target_ulong)0xFFFF0000), 0);
1692 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1694 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1695 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1696 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1697 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1698 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1699 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1703 #if defined(TARGET_PPC64)
1704 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1706 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1707 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1708 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1709 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1710 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1711 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1715 /*****************************************************************************/
1716 /* PowerPC 601 specific instructions (POWER bridge) */
1718 target_ulong helper_clcs (uint32_t arg)
1722 /* Instruction cache line size */
1723 return env->icache_line_size;
1726 /* Data cache line size */
1727 return env->dcache_line_size;
1730 /* Minimum cache line size */
1731 return (env->icache_line_size < env->dcache_line_size) ?
1732 env->icache_line_size : env->dcache_line_size;
1735 /* Maximum cache line size */
1736 return (env->icache_line_size > env->dcache_line_size) ?
1737 env->icache_line_size : env->dcache_line_size;
1746 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1748 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1750 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1751 (int32_t)arg2 == 0) {
1752 env->spr[SPR_MQ] = 0;
1755 env->spr[SPR_MQ] = tmp % arg2;
1756 return tmp / (int32_t)arg2;
1760 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1762 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1764 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1765 (int32_t)arg2 == 0) {
1766 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1767 env->spr[SPR_MQ] = 0;
1770 env->spr[SPR_MQ] = tmp % arg2;
1771 tmp /= (int32_t)arg2;
1772 if ((int32_t)tmp != tmp) {
1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1775 env->xer &= ~(1 << XER_OV);
1781 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1783 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1784 (int32_t)arg2 == 0) {
1785 env->spr[SPR_MQ] = 0;
1788 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1789 return (int32_t)arg1 / (int32_t)arg2;
1793 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1795 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1796 (int32_t)arg2 == 0) {
1797 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1798 env->spr[SPR_MQ] = 0;
1801 env->xer &= ~(1 << XER_OV);
1802 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1803 return (int32_t)arg1 / (int32_t)arg2;
1807 #if !defined (CONFIG_USER_ONLY)
1808 target_ulong helper_rac (target_ulong addr)
1812 target_ulong ret = 0;
1814 /* We don't have to generate many instances of this instruction,
1815 * as rac is supervisor only.
1817 /* XXX: FIX THIS: Pretend we have no BAT */
1818 nb_BATs = env->nb_BATs;
1820 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1822 env->nb_BATs = nb_BATs;
1826 void helper_rfsvc (void)
1828 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1832 /*****************************************************************************/
1833 /* 602 specific instructions */
1834 /* mfrom is the most crazy instruction ever seen, imho ! */
1835 /* Real implementation uses a ROM table. Do the same */
1836 /* Extremly decomposed:
1838 * return 256 * log10(10 + 1.0) + 0.5
1840 #if !defined (CONFIG_USER_ONLY)
1841 target_ulong helper_602_mfrom (target_ulong arg)
1843 if (likely(arg < 602)) {
1844 #include "mfrom_table.c"
1845 return mfrom_ROM_table[arg];
1852 /*****************************************************************************/
1853 /* Embedded PowerPC specific helpers */
1855 /* XXX: to be improved to check access rights when in user-mode */
1856 target_ulong helper_load_dcr (target_ulong dcrn)
1858 target_ulong val = 0;
1860 if (unlikely(env->dcr_env == NULL)) {
1861 qemu_log("No DCR environment\n");
1862 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1863 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1864 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1865 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1866 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1872 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1874 if (unlikely(env->dcr_env == NULL)) {
1875 qemu_log("No DCR environment\n");
1876 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1877 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1878 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1879 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1880 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1881 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1885 #if !defined(CONFIG_USER_ONLY)
1886 void helper_40x_rfci (void)
1888 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1889 ~((target_ulong)0xFFFF0000), 0);
1892 void helper_rfci (void)
1894 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1895 ~((target_ulong)0x3FFF0000), 0);
1898 void helper_rfdi (void)
1900 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1901 ~((target_ulong)0x3FFF0000), 0);
1904 void helper_rfmci (void)
1906 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1907 ~((target_ulong)0x3FFF0000), 0);
1912 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1918 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1919 if ((high & mask) == 0) {
1927 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1928 if ((low & mask) == 0) {
1940 env->xer = (env->xer & ~0x7F) | i;
1942 env->crf[0] |= xer_so;
1947 /*****************************************************************************/
1948 /* Altivec extension helpers */
1949 #if defined(WORDS_BIGENDIAN)
1957 #if defined(WORDS_BIGENDIAN)
1958 #define VECTOR_FOR_INORDER_I(index, element) \
1959 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1961 #define VECTOR_FOR_INORDER_I(index, element) \
1962 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1965 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1966 * execute the following block. */
1967 #define DO_HANDLE_NAN(result, x) \
1968 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1971 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1975 #define HANDLE_NAN1(result, x) \
1976 DO_HANDLE_NAN(result, x)
1977 #define HANDLE_NAN2(result, x, y) \
1978 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1979 #define HANDLE_NAN3(result, x, y, z) \
1980 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1982 /* Saturating arithmetic helpers. */
1983 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1984 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1987 if (use_min && x < min) { \
1990 } else if (use_max && x > max) { \
1998 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1999 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
2000 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
2001 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
2002 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
2003 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
2004 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
2005 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
2006 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
2009 #define LVE(name, access, swap, element) \
2010 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2012 size_t n_elems = ARRAY_SIZE(r->element); \
2013 int adjust = HI_IDX*(n_elems-1); \
2014 int sh = sizeof(r->element[0]) >> 1; \
2015 int index = (addr & 0xf) >> sh; \
2017 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2019 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2023 LVE(lvebx, ldub, I, u8)
2024 LVE(lvehx, lduw, bswap16, u16)
2025 LVE(lvewx, ldl, bswap32, u32)
2029 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2031 int i, j = (sh & 0xf);
2033 VECTOR_FOR_INORDER_I (i, u8) {
2038 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2040 int i, j = 0x10 - (sh & 0xf);
2042 VECTOR_FOR_INORDER_I (i, u8) {
2047 #define STVE(name, access, swap, element) \
2048 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2050 size_t n_elems = ARRAY_SIZE(r->element); \
2051 int adjust = HI_IDX*(n_elems-1); \
2052 int sh = sizeof(r->element[0]) >> 1; \
2053 int index = (addr & 0xf) >> sh; \
2055 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2057 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2061 STVE(stvebx, stb, I, u8)
2062 STVE(stvehx, stw, bswap16, u16)
2063 STVE(stvewx, stl, bswap32, u32)
2067 void helper_mtvscr (ppc_avr_t *r)
2069 #if defined(WORDS_BIGENDIAN)
2070 env->vscr = r->u32[3];
2072 env->vscr = r->u32[0];
2074 set_flush_to_zero(vscr_nj, &env->vec_status);
2077 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2080 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2081 r->u32[i] = ~a->u32[i] < b->u32[i];
2085 #define VARITH_DO(name, op, element) \
2086 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2089 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2090 r->element[i] = a->element[i] op b->element[i]; \
2093 #define VARITH(suffix, element) \
2094 VARITH_DO(add##suffix, +, element) \
2095 VARITH_DO(sub##suffix, -, element)
2102 #define VARITHSAT_CASE(type, op, cvt, element) \
2104 type result = (type)a->element[i] op (type)b->element[i]; \
2105 r->element[i] = cvt(result, &sat); \
2108 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2109 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2113 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2114 switch (sizeof(r->element[0])) { \
2115 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2116 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2117 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 env->vscr |= (1 << VSCR_SAT); \
2124 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2125 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2126 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2127 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2128 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2129 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2130 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2131 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2132 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2133 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2134 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2135 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2136 #undef VARITHSAT_CASE
2138 #undef VARITHSAT_SIGNED
2139 #undef VARITHSAT_UNSIGNED
2141 #define VAVG_DO(name, element, etype) \
2142 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2145 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2146 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2147 r->element[i] = x >> 1; \
2151 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2152 VAVG_DO(avgs##type, signed_element, signed_type) \
2153 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2154 VAVG(b, s8, int16_t, u8, uint16_t)
2155 VAVG(h, s16, int32_t, u16, uint32_t)
2156 VAVG(w, s32, int64_t, u32, uint64_t)
2160 #define VCF(suffix, cvt, element) \
2161 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2164 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2165 float32 t = cvt(b->element[i], &env->vec_status); \
2166 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2169 VCF(ux, uint32_to_float32, u32)
2170 VCF(sx, int32_to_float32, s32)
2173 #define VCMP_DO(suffix, compare, element, record) \
2174 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2176 uint32_t ones = (uint32_t)-1; \
2177 uint32_t all = ones; \
2178 uint32_t none = 0; \
2180 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2181 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2182 switch (sizeof (a->element[0])) { \
2183 case 4: r->u32[i] = result; break; \
2184 case 2: r->u16[i] = result; break; \
2185 case 1: r->u8[i] = result; break; \
2191 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2194 #define VCMP(suffix, compare, element) \
2195 VCMP_DO(suffix, compare, element, 0) \
2196 VCMP_DO(suffix##_dot, compare, element, 1)
2209 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2214 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2215 int32_t prod = a->s16[i] * b->s16[i];
2216 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2217 r->s16[i] = cvtswsh (t, &sat);
2221 env->vscr |= (1 << VSCR_SAT);
2225 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2230 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2231 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2232 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2233 r->s16[i] = cvtswsh (t, &sat);
2237 env->vscr |= (1 << VSCR_SAT);
2241 #define VMINMAX_DO(name, compare, element) \
2242 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2245 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2246 if (a->element[i] compare b->element[i]) { \
2247 r->element[i] = b->element[i]; \
2249 r->element[i] = a->element[i]; \
2253 #define VMINMAX(suffix, element) \
2254 VMINMAX_DO(min##suffix, >, element) \
2255 VMINMAX_DO(max##suffix, <, element)
2265 #define VMINMAXFP(suffix, rT, rF) \
2266 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2269 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2270 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2271 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2272 r->f[i] = rT->f[i]; \
2274 r->f[i] = rF->f[i]; \
2279 VMINMAXFP(minfp, a, b)
2280 VMINMAXFP(maxfp, b, a)
2283 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2286 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2287 int32_t prod = a->s16[i] * b->s16[i];
2288 r->s16[i] = (int16_t) (prod + c->s16[i]);
2292 #define VMRG_DO(name, element, highp) \
2293 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2297 size_t n_elems = ARRAY_SIZE(r->element); \
2298 for (i = 0; i < n_elems/2; i++) { \
2300 result.element[i*2+HI_IDX] = a->element[i]; \
2301 result.element[i*2+LO_IDX] = b->element[i]; \
2303 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2304 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2309 #if defined(WORDS_BIGENDIAN)
2316 #define VMRG(suffix, element) \
2317 VMRG_DO(mrgl##suffix, element, MRGHI) \
2318 VMRG_DO(mrgh##suffix, element, MRGLO)
2327 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2332 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2333 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2336 VECTOR_FOR_INORDER_I(i, s32) {
2337 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2341 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2346 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2347 prod[i] = a->s16[i] * b->s16[i];
2350 VECTOR_FOR_INORDER_I(i, s32) {
2351 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2355 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2361 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2362 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2365 VECTOR_FOR_INORDER_I (i, s32) {
2366 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2367 r->u32[i] = cvtsdsw(t, &sat);
2371 env->vscr |= (1 << VSCR_SAT);
2375 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2380 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2381 prod[i] = a->u8[i] * b->u8[i];
2384 VECTOR_FOR_INORDER_I(i, u32) {
2385 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2389 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2394 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2395 prod[i] = a->u16[i] * b->u16[i];
2398 VECTOR_FOR_INORDER_I(i, u32) {
2399 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2403 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2409 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2410 prod[i] = a->u16[i] * b->u16[i];
2413 VECTOR_FOR_INORDER_I (i, s32) {
2414 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2415 r->u32[i] = cvtuduw(t, &sat);
2419 env->vscr |= (1 << VSCR_SAT);
2423 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2424 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2427 VECTOR_FOR_INORDER_I(i, prod_element) { \
2429 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2431 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2435 #define VMUL(suffix, mul_element, prod_element) \
2436 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2437 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2445 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2449 VECTOR_FOR_INORDER_I (i, u8) {
2450 int s = c->u8[i] & 0x1f;
2451 #if defined(WORDS_BIGENDIAN)
2452 int index = s & 0xf;
2454 int index = 15 - (s & 0xf);
2457 result.u8[i] = b->u8[index];
2459 result.u8[i] = a->u8[index];
2465 #if defined(WORDS_BIGENDIAN)
2470 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2474 #if defined(WORDS_BIGENDIAN)
2475 const ppc_avr_t *x[2] = { a, b };
2477 const ppc_avr_t *x[2] = { b, a };
2480 VECTOR_FOR_INORDER_I (i, u64) {
2481 VECTOR_FOR_INORDER_I (j, u32){
2482 uint32_t e = x[i]->u32[j];
2483 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2484 ((e >> 6) & 0x3e0) |
2491 #define VPK(suffix, from, to, cvt, dosat) \
2492 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2497 ppc_avr_t *a0 = PKBIG ? a : b; \
2498 ppc_avr_t *a1 = PKBIG ? b : a; \
2499 VECTOR_FOR_INORDER_I (i, from) { \
2500 result.to[i] = cvt(a0->from[i], &sat); \
2501 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2504 if (dosat && sat) { \
2505 env->vscr |= (1 << VSCR_SAT); \
2509 VPK(shss, s16, s8, cvtshsb, 1)
2510 VPK(shus, s16, u8, cvtshub, 1)
2511 VPK(swss, s32, s16, cvtswsh, 1)
2512 VPK(swus, s32, u16, cvtswuh, 1)
2513 VPK(uhus, u16, u8, cvtuhub, 1)
2514 VPK(uwus, u32, u16, cvtuwuh, 1)
2515 VPK(uhum, u16, u8, I, 0)
2516 VPK(uwum, u32, u16, I, 0)
2521 #define VRFI(suffix, rounding) \
2522 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2525 float_status s = env->vec_status; \
2526 set_float_rounding_mode(rounding, &s); \
2527 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2528 HANDLE_NAN1(r->f[i], b->f[i]) { \
2529 r->f[i] = float32_round_to_int (b->f[i], &s); \
2533 VRFI(n, float_round_nearest_even)
2534 VRFI(m, float_round_down)
2535 VRFI(p, float_round_up)
2536 VRFI(z, float_round_to_zero)
2539 #define VROTATE(suffix, element) \
2540 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2543 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2544 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2545 unsigned int shift = b->element[i] & mask; \
2546 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2554 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2556 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2557 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2560 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2563 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2564 HANDLE_NAN1(r->f[i], b->f[i]) {
2565 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2570 #if defined(WORDS_BIGENDIAN)
2577 /* The specification says that the results are undefined if all of the
2578 * shift counts are not identical. We check to make sure that they are
2579 * to conform to what real hardware appears to do. */
2580 #define VSHIFT(suffix, leftp) \
2581 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2583 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2586 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2587 doit = doit && ((b->u8[i] & 0x7) == shift); \
2592 } else if (leftp) { \
2593 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2594 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2595 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2597 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2598 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2599 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2609 #define VSL(suffix, element) \
2610 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2613 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2614 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2615 unsigned int shift = b->element[i] & mask; \
2616 r->element[i] = a->element[i] << shift; \
2624 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2626 int sh = shift & 0xf;
2630 #if defined(WORDS_BIGENDIAN)
2631 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2634 result.u8[i] = b->u8[index-0x10];
2636 result.u8[i] = a->u8[index];
2640 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2641 int index = (16 - sh) + i;
2643 result.u8[i] = a->u8[index-0x10];
2645 result.u8[i] = b->u8[index];
2652 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2654 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2656 #if defined (WORDS_BIGENDIAN)
2657 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2658 memset (&r->u8[16-sh], 0, sh);
2660 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2661 memset (&r->u8[0], 0, sh);
2665 /* Experimental testing shows that hardware masks the immediate. */
2666 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2667 #if defined(WORDS_BIGENDIAN)
2668 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2670 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2672 #define VSPLT(suffix, element) \
2673 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2675 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2677 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2678 r->element[i] = s; \
2685 #undef SPLAT_ELEMENT
2686 #undef _SPLAT_MASKED
2688 #define VSPLTI(suffix, element, splat_type) \
2689 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2691 splat_type x = (int8_t)(splat << 3) >> 3; \
2693 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2694 r->element[i] = x; \
2697 VSPLTI(b, s8, int8_t)
2698 VSPLTI(h, s16, int16_t)
2699 VSPLTI(w, s32, int32_t)
2702 #define VSR(suffix, element) \
2703 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2706 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2707 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2708 unsigned int shift = b->element[i] & mask; \
2709 r->element[i] = a->element[i] >> shift; \
2720 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2722 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2724 #if defined (WORDS_BIGENDIAN)
2725 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2726 memset (&r->u8[0], 0, sh);
2728 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2729 memset (&r->u8[16-sh], 0, sh);
2733 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2736 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2737 r->u32[i] = a->u32[i] >= b->u32[i];
2741 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2748 #if defined(WORDS_BIGENDIAN)
2749 upper = ARRAY_SIZE(r->s32)-1;
2753 t = (int64_t)b->s32[upper];
2754 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2758 result.s32[upper] = cvtsdsw(t, &sat);
2762 env->vscr |= (1 << VSCR_SAT);
2766 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2772 #if defined(WORDS_BIGENDIAN)
2777 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2778 int64_t t = (int64_t)b->s32[upper+i*2];
2780 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2783 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2788 env->vscr |= (1 << VSCR_SAT);
2792 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2797 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2798 int64_t t = (int64_t)b->s32[i];
2799 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2802 r->s32[i] = cvtsdsw(t, &sat);
2806 env->vscr |= (1 << VSCR_SAT);
2810 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2815 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2816 int64_t t = (int64_t)b->s32[i];
2817 t += a->s16[2*i] + a->s16[2*i+1];
2818 r->s32[i] = cvtsdsw(t, &sat);
2822 env->vscr |= (1 << VSCR_SAT);
2826 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2831 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2832 uint64_t t = (uint64_t)b->u32[i];
2833 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2836 r->u32[i] = cvtuduw(t, &sat);
2840 env->vscr |= (1 << VSCR_SAT);
2844 #if defined(WORDS_BIGENDIAN)
2851 #define VUPKPX(suffix, hi) \
2852 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2856 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2857 uint16_t e = b->u16[hi ? i : i+4]; \
2858 uint8_t a = (e >> 15) ? 0xff : 0; \
2859 uint8_t r = (e >> 10) & 0x1f; \
2860 uint8_t g = (e >> 5) & 0x1f; \
2861 uint8_t b = e & 0x1f; \
2862 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2870 #define VUPK(suffix, unpacked, packee, hi) \
2871 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2876 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
2877 result.unpacked[i] = b->packee[i]; \
2880 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2881 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2886 VUPK(hsb, s16, s8, UPKHI)
2887 VUPK(hsh, s32, s16, UPKHI)
2888 VUPK(lsb, s16, s8, UPKLO)
2889 VUPK(lsh, s32, s16, UPKLO)
2894 #undef DO_HANDLE_NAN
2898 #undef VECTOR_FOR_INORDER_I
2902 /*****************************************************************************/
2903 /* SPE extension helpers */
2904 /* Use a table to make this quicker */
2905 static uint8_t hbrev[16] = {
2906 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2907 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2910 static always_inline uint8_t byte_reverse (uint8_t val)
2912 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2915 static always_inline uint32_t word_reverse (uint32_t val)
2917 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2918 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2921 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2922 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2924 uint32_t a, b, d, mask;
2926 mask = UINT32_MAX >> (32 - MASKBITS);
2929 d = word_reverse(1 + word_reverse(a | ~b));
2930 return (arg1 & ~mask) | (d & b);
2933 uint32_t helper_cntlsw32 (uint32_t val)
2935 if (val & 0x80000000)
2941 uint32_t helper_cntlzw32 (uint32_t val)
2946 /* Single-precision floating-point conversions */
2947 static always_inline uint32_t efscfsi (uint32_t val)
2951 u.f = int32_to_float32(val, &env->vec_status);
2956 static always_inline uint32_t efscfui (uint32_t val)
2960 u.f = uint32_to_float32(val, &env->vec_status);
2965 static always_inline int32_t efsctsi (uint32_t val)
2970 /* NaN are not treated the same way IEEE 754 does */
2971 if (unlikely(float32_is_nan(u.f)))
2974 return float32_to_int32(u.f, &env->vec_status);
2977 static always_inline uint32_t efsctui (uint32_t val)
2982 /* NaN are not treated the same way IEEE 754 does */
2983 if (unlikely(float32_is_nan(u.f)))
2986 return float32_to_uint32(u.f, &env->vec_status);
2989 static always_inline uint32_t efsctsiz (uint32_t val)
2994 /* NaN are not treated the same way IEEE 754 does */
2995 if (unlikely(float32_is_nan(u.f)))
2998 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3001 static always_inline uint32_t efsctuiz (uint32_t val)
3006 /* NaN are not treated the same way IEEE 754 does */
3007 if (unlikely(float32_is_nan(u.f)))
3010 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3013 static always_inline uint32_t efscfsf (uint32_t val)
3018 u.f = int32_to_float32(val, &env->vec_status);
3019 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3020 u.f = float32_div(u.f, tmp, &env->vec_status);
3025 static always_inline uint32_t efscfuf (uint32_t val)
3030 u.f = uint32_to_float32(val, &env->vec_status);
3031 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3032 u.f = float32_div(u.f, tmp, &env->vec_status);
3037 static always_inline uint32_t efsctsf (uint32_t val)
3043 /* NaN are not treated the same way IEEE 754 does */
3044 if (unlikely(float32_is_nan(u.f)))
3046 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3047 u.f = float32_mul(u.f, tmp, &env->vec_status);
3049 return float32_to_int32(u.f, &env->vec_status);
3052 static always_inline uint32_t efsctuf (uint32_t val)
3058 /* NaN are not treated the same way IEEE 754 does */
3059 if (unlikely(float32_is_nan(u.f)))
3061 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3062 u.f = float32_mul(u.f, tmp, &env->vec_status);
3064 return float32_to_uint32(u.f, &env->vec_status);
3067 #define HELPER_SPE_SINGLE_CONV(name) \
3068 uint32_t helper_e##name (uint32_t val) \
3070 return e##name(val); \
3073 HELPER_SPE_SINGLE_CONV(fscfsi);
3075 HELPER_SPE_SINGLE_CONV(fscfui);
3077 HELPER_SPE_SINGLE_CONV(fscfuf);
3079 HELPER_SPE_SINGLE_CONV(fscfsf);
3081 HELPER_SPE_SINGLE_CONV(fsctsi);
3083 HELPER_SPE_SINGLE_CONV(fsctui);
3085 HELPER_SPE_SINGLE_CONV(fsctsiz);
3087 HELPER_SPE_SINGLE_CONV(fsctuiz);
3089 HELPER_SPE_SINGLE_CONV(fsctsf);
3091 HELPER_SPE_SINGLE_CONV(fsctuf);
3093 #define HELPER_SPE_VECTOR_CONV(name) \
3094 uint64_t helper_ev##name (uint64_t val) \
3096 return ((uint64_t)e##name(val >> 32) << 32) | \
3097 (uint64_t)e##name(val); \
3100 HELPER_SPE_VECTOR_CONV(fscfsi);
3102 HELPER_SPE_VECTOR_CONV(fscfui);
3104 HELPER_SPE_VECTOR_CONV(fscfuf);
3106 HELPER_SPE_VECTOR_CONV(fscfsf);
3108 HELPER_SPE_VECTOR_CONV(fsctsi);
3110 HELPER_SPE_VECTOR_CONV(fsctui);
3112 HELPER_SPE_VECTOR_CONV(fsctsiz);
3114 HELPER_SPE_VECTOR_CONV(fsctuiz);
3116 HELPER_SPE_VECTOR_CONV(fsctsf);
3118 HELPER_SPE_VECTOR_CONV(fsctuf);
3120 /* Single-precision floating-point arithmetic */
3121 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3126 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3130 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3135 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3139 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3144 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3148 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3153 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3157 #define HELPER_SPE_SINGLE_ARITH(name) \
3158 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3160 return e##name(op1, op2); \
3163 HELPER_SPE_SINGLE_ARITH(fsadd);
3165 HELPER_SPE_SINGLE_ARITH(fssub);
3167 HELPER_SPE_SINGLE_ARITH(fsmul);
3169 HELPER_SPE_SINGLE_ARITH(fsdiv);
3171 #define HELPER_SPE_VECTOR_ARITH(name) \
3172 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3174 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3175 (uint64_t)e##name(op1, op2); \
3178 HELPER_SPE_VECTOR_ARITH(fsadd);
3180 HELPER_SPE_VECTOR_ARITH(fssub);
3182 HELPER_SPE_VECTOR_ARITH(fsmul);
3184 HELPER_SPE_VECTOR_ARITH(fsdiv);
3186 /* Single-precision floating-point comparisons */
3187 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3192 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3195 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3200 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3203 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3208 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3211 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3213 /* XXX: TODO: test special values (NaN, infinites, ...) */
3214 return efststlt(op1, op2);
3217 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3219 /* XXX: TODO: test special values (NaN, infinites, ...) */
3220 return efststgt(op1, op2);
3223 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3225 /* XXX: TODO: test special values (NaN, infinites, ...) */
3226 return efststeq(op1, op2);
3229 #define HELPER_SINGLE_SPE_CMP(name) \
3230 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3232 return e##name(op1, op2) << 2; \
3235 HELPER_SINGLE_SPE_CMP(fststlt);
3237 HELPER_SINGLE_SPE_CMP(fststgt);
3239 HELPER_SINGLE_SPE_CMP(fststeq);
3241 HELPER_SINGLE_SPE_CMP(fscmplt);
3243 HELPER_SINGLE_SPE_CMP(fscmpgt);
3245 HELPER_SINGLE_SPE_CMP(fscmpeq);
3247 static always_inline uint32_t evcmp_merge (int t0, int t1)
3249 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3252 #define HELPER_VECTOR_SPE_CMP(name) \
3253 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3255 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3258 HELPER_VECTOR_SPE_CMP(fststlt);
3260 HELPER_VECTOR_SPE_CMP(fststgt);
3262 HELPER_VECTOR_SPE_CMP(fststeq);
3264 HELPER_VECTOR_SPE_CMP(fscmplt);
3266 HELPER_VECTOR_SPE_CMP(fscmpgt);
3268 HELPER_VECTOR_SPE_CMP(fscmpeq);
3270 /* Double-precision floating-point conversion */
3271 uint64_t helper_efdcfsi (uint32_t val)
3275 u.d = int32_to_float64(val, &env->vec_status);
3280 uint64_t helper_efdcfsid (uint64_t val)
3284 u.d = int64_to_float64(val, &env->vec_status);
3289 uint64_t helper_efdcfui (uint32_t val)
3293 u.d = uint32_to_float64(val, &env->vec_status);
3298 uint64_t helper_efdcfuid (uint64_t val)
3302 u.d = uint64_to_float64(val, &env->vec_status);
3307 uint32_t helper_efdctsi (uint64_t val)
3312 /* NaN are not treated the same way IEEE 754 does */
3313 if (unlikely(float64_is_nan(u.d)))
3316 return float64_to_int32(u.d, &env->vec_status);
3319 uint32_t helper_efdctui (uint64_t val)
3324 /* NaN are not treated the same way IEEE 754 does */
3325 if (unlikely(float64_is_nan(u.d)))
3328 return float64_to_uint32(u.d, &env->vec_status);
3331 uint32_t helper_efdctsiz (uint64_t val)
3336 /* NaN are not treated the same way IEEE 754 does */
3337 if (unlikely(float64_is_nan(u.d)))
3340 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3343 uint64_t helper_efdctsidz (uint64_t val)
3348 /* NaN are not treated the same way IEEE 754 does */
3349 if (unlikely(float64_is_nan(u.d)))
3352 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3355 uint32_t helper_efdctuiz (uint64_t val)
3360 /* NaN are not treated the same way IEEE 754 does */
3361 if (unlikely(float64_is_nan(u.d)))
3364 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3367 uint64_t helper_efdctuidz (uint64_t val)
3372 /* NaN are not treated the same way IEEE 754 does */
3373 if (unlikely(float64_is_nan(u.d)))
3376 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3379 uint64_t helper_efdcfsf (uint32_t val)
3384 u.d = int32_to_float64(val, &env->vec_status);
3385 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3386 u.d = float64_div(u.d, tmp, &env->vec_status);
3391 uint64_t helper_efdcfuf (uint32_t val)
3396 u.d = uint32_to_float64(val, &env->vec_status);
3397 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3398 u.d = float64_div(u.d, tmp, &env->vec_status);
3403 uint32_t helper_efdctsf (uint64_t val)
3409 /* NaN are not treated the same way IEEE 754 does */
3410 if (unlikely(float64_is_nan(u.d)))
3412 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3413 u.d = float64_mul(u.d, tmp, &env->vec_status);
3415 return float64_to_int32(u.d, &env->vec_status);
3418 uint32_t helper_efdctuf (uint64_t val)
3424 /* NaN are not treated the same way IEEE 754 does */
3425 if (unlikely(float64_is_nan(u.d)))
3427 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3428 u.d = float64_mul(u.d, tmp, &env->vec_status);
3430 return float64_to_uint32(u.d, &env->vec_status);
3433 uint32_t helper_efscfd (uint64_t val)
3439 u2.f = float64_to_float32(u1.d, &env->vec_status);
3444 uint64_t helper_efdcfs (uint32_t val)
3450 u2.d = float32_to_float64(u1.f, &env->vec_status);
3455 /* Double precision fixed-point arithmetic */
3456 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3461 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3465 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3470 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3474 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3479 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3483 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3488 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3492 /* Double precision floating point helpers */
3493 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3498 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3501 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3506 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3509 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3514 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3517 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3519 /* XXX: TODO: test special values (NaN, infinites, ...) */
3520 return helper_efdtstlt(op1, op2);
3523 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3525 /* XXX: TODO: test special values (NaN, infinites, ...) */
3526 return helper_efdtstgt(op1, op2);
3529 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3531 /* XXX: TODO: test special values (NaN, infinites, ...) */
3532 return helper_efdtsteq(op1, op2);
3535 /*****************************************************************************/
3536 /* Softmmu support */
3537 #if !defined (CONFIG_USER_ONLY)
3539 #define MMUSUFFIX _mmu
3542 #include "softmmu_template.h"
3545 #include "softmmu_template.h"
3548 #include "softmmu_template.h"
3551 #include "softmmu_template.h"
3553 /* try to fill the TLB and return an exception if error. If retaddr is
3554 NULL, it means that the function was called in C code (i.e. not
3555 from generated code or from helper.c) */
3556 /* XXX: fix it to restore all registers */
3557 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3559 TranslationBlock *tb;
3560 CPUState *saved_env;
3564 /* XXX: hack to restore env in all cases, even if not called from
3567 env = cpu_single_env;
3568 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3569 if (unlikely(ret != 0)) {
3570 if (likely(retaddr)) {
3571 /* now we have a real cpu fault */
3572 pc = (unsigned long)retaddr;
3573 tb = tb_find_pc(pc);
3575 /* the PC is inside the translated code. It means that we have
3576 a virtual CPU fault */
3577 cpu_restore_state(tb, env, pc, NULL);
3580 helper_raise_exception_err(env->exception_index, env->error_code);
3585 /* Segment registers load and store */
3586 target_ulong helper_load_sr (target_ulong sr_num)
3588 return env->sr[sr_num];
3591 void helper_store_sr (target_ulong sr_num, target_ulong val)
3593 ppc_store_sr(env, sr_num, val);
3596 /* SLB management */
3597 #if defined(TARGET_PPC64)
3598 target_ulong helper_load_slb (target_ulong slb_nr)
3600 return ppc_load_slb(env, slb_nr);
3603 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3605 ppc_store_slb(env, slb_nr, rs);
3608 void helper_slbia (void)
3610 ppc_slb_invalidate_all(env);
3613 void helper_slbie (target_ulong addr)
3615 ppc_slb_invalidate_one(env, addr);
3618 #endif /* defined(TARGET_PPC64) */
3620 /* TLB management */
3621 void helper_tlbia (void)
3623 ppc_tlb_invalidate_all(env);
3626 void helper_tlbie (target_ulong addr)
3628 ppc_tlb_invalidate_one(env, addr);
3631 /* Software driven TLBs management */
3632 /* PowerPC 602/603 software TLB load instructions helpers */
3633 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3635 target_ulong RPN, CMP, EPN;
3638 RPN = env->spr[SPR_RPA];
3640 CMP = env->spr[SPR_ICMP];
3641 EPN = env->spr[SPR_IMISS];
3643 CMP = env->spr[SPR_DCMP];
3644 EPN = env->spr[SPR_DMISS];
3646 way = (env->spr[SPR_SRR1] >> 17) & 1;
3647 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3648 " PTE1 " ADDRX " way %d\n",
3649 __func__, new_EPN, EPN, CMP, RPN, way);
3650 /* Store this TLB */
3651 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3652 way, is_code, CMP, RPN);
3655 void helper_6xx_tlbd (target_ulong EPN)
3660 void helper_6xx_tlbi (target_ulong EPN)
3665 /* PowerPC 74xx software TLB load instructions helpers */
3666 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3668 target_ulong RPN, CMP, EPN;
3671 RPN = env->spr[SPR_PTELO];
3672 CMP = env->spr[SPR_PTEHI];
3673 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3674 way = env->spr[SPR_TLBMISS] & 0x3;
3675 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3676 " PTE1 " ADDRX " way %d\n",
3677 __func__, new_EPN, EPN, CMP, RPN, way);
3678 /* Store this TLB */
3679 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3680 way, is_code, CMP, RPN);
3683 void helper_74xx_tlbd (target_ulong EPN)
3685 do_74xx_tlb(EPN, 0);
3688 void helper_74xx_tlbi (target_ulong EPN)
3690 do_74xx_tlb(EPN, 1);
3693 static always_inline target_ulong booke_tlb_to_page_size (int size)
3695 return 1024 << (2 * size);
3698 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3702 switch (page_size) {
3736 #if defined (TARGET_PPC64)
3737 case 0x000100000000ULL:
3740 case 0x000400000000ULL:
3743 case 0x001000000000ULL:
3746 case 0x004000000000ULL:
3749 case 0x010000000000ULL:
3761 /* Helpers for 4xx TLB management */
3762 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3769 tlb = &env->tlb[entry].tlbe;
3771 if (tlb->prot & PAGE_VALID)
3773 size = booke_page_size_to_tlb(tlb->size);
3774 if (size < 0 || size > 0x7)
3777 env->spr[SPR_40x_PID] = tlb->PID;
3781 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3787 tlb = &env->tlb[entry].tlbe;
3789 if (tlb->prot & PAGE_EXEC)
3791 if (tlb->prot & PAGE_WRITE)
3796 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3799 target_ulong page, end;
3801 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3803 tlb = &env->tlb[entry].tlbe;
3804 /* Invalidate previous TLB (if it's valid) */
3805 if (tlb->prot & PAGE_VALID) {
3806 end = tlb->EPN + tlb->size;
3807 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3808 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3809 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3810 tlb_flush_page(env, page);
3812 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3813 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3814 * If this ever occurs, one should use the ppcemb target instead
3815 * of the ppc or ppc64 one
3817 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3818 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3819 "are not supported (%d)\n",
3820 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3822 tlb->EPN = val & ~(tlb->size - 1);
3824 tlb->prot |= PAGE_VALID;
3826 tlb->prot &= ~PAGE_VALID;
3828 /* XXX: TO BE FIXED */
3829 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3831 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3832 tlb->attr = val & 0xFF;
3833 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3834 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3835 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3836 tlb->prot & PAGE_READ ? 'r' : '-',
3837 tlb->prot & PAGE_WRITE ? 'w' : '-',
3838 tlb->prot & PAGE_EXEC ? 'x' : '-',
3839 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3840 /* Invalidate new TLB (if valid) */
3841 if (tlb->prot & PAGE_VALID) {
3842 end = tlb->EPN + tlb->size;
3843 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3844 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3845 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3846 tlb_flush_page(env, page);
3850 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3854 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3856 tlb = &env->tlb[entry].tlbe;
3857 tlb->RPN = val & 0xFFFFFC00;
3858 tlb->prot = PAGE_READ;
3860 tlb->prot |= PAGE_EXEC;
3862 tlb->prot |= PAGE_WRITE;
3863 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3864 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3865 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3866 tlb->prot & PAGE_READ ? 'r' : '-',
3867 tlb->prot & PAGE_WRITE ? 'w' : '-',
3868 tlb->prot & PAGE_EXEC ? 'x' : '-',
3869 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3872 target_ulong helper_4xx_tlbsx (target_ulong address)
3874 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3877 /* PowerPC 440 TLB management */
3878 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3881 target_ulong EPN, RPN, size;
3884 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
3885 __func__, word, (int)entry, value);
3888 tlb = &env->tlb[entry].tlbe;
3891 /* Just here to please gcc */
3893 EPN = value & 0xFFFFFC00;
3894 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3897 size = booke_tlb_to_page_size((value >> 4) & 0xF);
3898 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3902 tlb->attr |= (value >> 8) & 1;
3903 if (value & 0x200) {
3904 tlb->prot |= PAGE_VALID;
3906 if (tlb->prot & PAGE_VALID) {
3907 tlb->prot &= ~PAGE_VALID;
3911 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3916 RPN = value & 0xFFFFFC0F;
3917 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3922 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3923 tlb->prot = tlb->prot & PAGE_VALID;
3925 tlb->prot |= PAGE_READ << 4;
3927 tlb->prot |= PAGE_WRITE << 4;
3929 tlb->prot |= PAGE_EXEC << 4;
3931 tlb->prot |= PAGE_READ;
3933 tlb->prot |= PAGE_WRITE;
3935 tlb->prot |= PAGE_EXEC;
3940 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3947 tlb = &env->tlb[entry].tlbe;
3950 /* Just here to please gcc */
3953 size = booke_page_size_to_tlb(tlb->size);
3954 if (size < 0 || size > 0xF)
3957 if (tlb->attr & 0x1)
3959 if (tlb->prot & PAGE_VALID)
3961 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3962 env->spr[SPR_440_MMUCR] |= tlb->PID;
3968 ret = tlb->attr & ~0x1;
3969 if (tlb->prot & (PAGE_READ << 4))
3971 if (tlb->prot & (PAGE_WRITE << 4))
3973 if (tlb->prot & (PAGE_EXEC << 4))
3975 if (tlb->prot & PAGE_READ)
3977 if (tlb->prot & PAGE_WRITE)
3979 if (tlb->prot & PAGE_EXEC)
3986 target_ulong helper_440_tlbsx (target_ulong address)
3988 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3991 #endif /* !CONFIG_USER_ONLY */