2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "host-utils.h"
23 #include "softfloat.h"
25 #include "op_helper.h"
27 void helper_tb_flush (void)
32 void cpu_dump_EA (target_ulong EA);
33 void helper_print_mem_EA (target_ulong EA)
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
40 void helper_excp (int excp, int error)
42 env->exception_index = excp;
43 env->error_code = error;
47 uint64_t helper_amask (uint64_t arg)
49 switch (env->implver) {
51 /* EV4, EV45, LCA, LCA45 & EV5 */
62 uint64_t helper_load_pcc (void)
68 uint64_t helper_load_implver (void)
73 uint64_t helper_load_fpcr (void)
76 #ifdef CONFIG_SOFTFLOAT
77 ret |= env->fp_status.float_exception_flags << 52;
78 if (env->fp_status.float_exception_flags)
80 env->ipr[IPR_EXC_SUM] &= ~0x3E:
81 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
83 switch (env->fp_status.float_rounding_mode) {
84 case float_round_nearest_even:
87 case float_round_down:
93 case float_round_to_zero:
99 void helper_store_fpcr (uint64_t val)
101 #ifdef CONFIG_SOFTFLOAT
102 set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
104 switch ((val >> 58) & 3) {
106 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
109 set_float_rounding_mode(float_round_down, &FP_STATUS);
112 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
115 set_float_rounding_mode(float_round_up, &FP_STATUS);
120 spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
122 uint64_t helper_rs(void)
126 spin_lock(&intr_cpu_lock);
127 tmp = env->intr_flag;
129 spin_unlock(&intr_cpu_lock);
134 uint64_t helper_rc(void)
138 spin_lock(&intr_cpu_lock);
139 tmp = env->intr_flag;
141 spin_unlock(&intr_cpu_lock);
146 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
150 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
151 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
156 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
159 op1 = (uint32_t)(op1 + op2);
160 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
161 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
166 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
170 if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
171 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
176 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
179 op1 = (uint32_t)(op1 - op2);
180 if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
181 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
186 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
188 int64_t res = (int64_t)op1 * (int64_t)op2;
190 if (unlikely((int32_t)res != res)) {
191 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
193 return (int64_t)((int32_t)res);
196 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
200 muls64(&tl, &th, op1, op2);
201 /* If th != 0 && th != -1, then we had an overflow */
202 if (unlikely((th + 1) > 1)) {
203 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
208 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
212 mulu64(&tl, &th, op1, op2);
216 uint64_t helper_ctpop (uint64_t arg)
221 uint64_t helper_ctlz (uint64_t arg)
226 uint64_t helper_cttz (uint64_t arg)
231 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
236 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
237 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
238 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
239 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
240 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
241 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
242 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
243 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
248 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
250 return byte_zap(val, 0x01 << (mask & 7));
253 uint64_t helper_insbl(uint64_t val, uint64_t mask)
255 val <<= (mask & 7) * 8;
256 return byte_zap(val, ~(0x01 << (mask & 7)));
259 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
261 return byte_zap(val, 0x03 << (mask & 7));
264 uint64_t helper_inswl(uint64_t val, uint64_t mask)
266 val <<= (mask & 7) * 8;
267 return byte_zap(val, ~(0x03 << (mask & 7)));
270 uint64_t helper_mskll(uint64_t val, uint64_t mask)
272 return byte_zap(val, 0x0F << (mask & 7));
275 uint64_t helper_insll(uint64_t val, uint64_t mask)
277 val <<= (mask & 7) * 8;
278 return byte_zap(val, ~(0x0F << (mask & 7)));
281 uint64_t helper_zap(uint64_t val, uint64_t mask)
283 return byte_zap(val, mask);
286 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
288 return byte_zap(val, ~mask);
291 uint64_t helper_mskql(uint64_t val, uint64_t mask)
293 return byte_zap(val, 0xFF << (mask & 7));
296 uint64_t helper_insql(uint64_t val, uint64_t mask)
298 val <<= (mask & 7) * 8;
299 return byte_zap(val, ~(0xFF << (mask & 7)));
302 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
304 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
307 uint64_t helper_inswh(uint64_t val, uint64_t mask)
309 val >>= 64 - ((mask & 7) * 8);
310 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
313 uint64_t helper_msklh(uint64_t val, uint64_t mask)
315 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
318 uint64_t helper_inslh(uint64_t val, uint64_t mask)
320 val >>= 64 - ((mask & 7) * 8);
321 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
324 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
326 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
329 uint64_t helper_insqh(uint64_t val, uint64_t mask)
331 val >>= 64 - ((mask & 7) * 8);
332 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
335 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
337 uint8_t opa, opb, res;
341 for (i = 0; i < 7; i++) {
342 opa = op1 >> (i * 8);
343 opb = op2 >> (i * 8);
350 /* Floating point helpers */
352 /* F floating (VAX) */
353 static always_inline uint64_t float32_to_f (float32 fa)
356 uint64_t r, exp, mant, sig;
358 a = *(uint32_t*)(&fa);
359 sig = ((uint64_t)a & 0x80000000) << 32;
360 exp = (a >> 23) & 0xff;
361 mant = ((uint64_t)a & 0x007fffff) << 29;
364 /* NaN or infinity */
365 r = 1; /* VAX dirty zero */
366 } else if (exp == 0) {
372 r = sig | ((exp + 1) << 52) | mant;
377 r = 1; /* VAX dirty zero */
379 r = sig | ((exp + 2) << 52);
386 static always_inline float32 f_to_float32 (uint64_t a)
388 uint32_t r, exp, mant_sig;
390 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
391 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
393 if (unlikely(!exp && mant_sig)) {
394 /* Reserved operands / Dirty zero */
395 helper_excp(EXCP_OPCDEC, 0);
402 r = ((exp - 2) << 23) | mant_sig;
405 return *(float32*)(&a);
408 uint32_t helper_f_to_memory (uint64_t a)
411 r = (a & 0x00001fffe0000000ull) >> 13;
412 r |= (a & 0x07ffe00000000000ull) >> 45;
413 r |= (a & 0xc000000000000000ull) >> 48;
417 uint64_t helper_memory_to_f (uint32_t a)
420 r = ((uint64_t)(a & 0x0000c000)) << 48;
421 r |= ((uint64_t)(a & 0x003fffff)) << 45;
422 r |= ((uint64_t)(a & 0xffff0000)) << 13;
423 if (!(a & 0x00004000))
428 uint64_t helper_addf (uint64_t a, uint64_t b)
432 fa = f_to_float32(a);
433 fb = f_to_float32(b);
434 fr = float32_add(fa, fb, &FP_STATUS);
435 return float32_to_f(fr);
438 uint64_t helper_subf (uint64_t a, uint64_t b)
442 fa = f_to_float32(a);
443 fb = f_to_float32(b);
444 fr = float32_sub(fa, fb, &FP_STATUS);
445 return float32_to_f(fr);
448 uint64_t helper_mulf (uint64_t a, uint64_t b)
452 fa = f_to_float32(a);
453 fb = f_to_float32(b);
454 fr = float32_mul(fa, fb, &FP_STATUS);
455 return float32_to_f(fr);
458 uint64_t helper_divf (uint64_t a, uint64_t b)
462 fa = f_to_float32(a);
463 fb = f_to_float32(b);
464 fr = float32_div(fa, fb, &FP_STATUS);
465 return float32_to_f(fr);
468 uint64_t helper_sqrtf (uint64_t t)
472 ft = f_to_float32(t);
473 fr = float32_sqrt(ft, &FP_STATUS);
474 return float32_to_f(fr);
478 /* G floating (VAX) */
479 static always_inline uint64_t float64_to_g (float64 fa)
481 uint64_t a, r, exp, mant, sig;
483 a = *(uint64_t*)(&fa);
484 sig = a & 0x8000000000000000ull;
485 exp = (a >> 52) & 0x7ff;
486 mant = a & 0x000fffffffffffffull;
489 /* NaN or infinity */
490 r = 1; /* VAX dirty zero */
491 } else if (exp == 0) {
497 r = sig | ((exp + 1) << 52) | mant;
502 r = 1; /* VAX dirty zero */
504 r = sig | ((exp + 2) << 52);
511 static always_inline float64 g_to_float64 (uint64_t a)
513 uint64_t r, exp, mant_sig;
515 exp = (a >> 52) & 0x7ff;
516 mant_sig = a & 0x800fffffffffffffull;
518 if (!exp && mant_sig) {
519 /* Reserved operands / Dirty zero */
520 helper_excp(EXCP_OPCDEC, 0);
527 r = ((exp - 2) << 52) | mant_sig;
530 return *(float64*)(&a);
533 uint64_t helper_g_to_memory (uint64_t a)
536 r = (a & 0x000000000000ffffull) << 48;
537 r |= (a & 0x00000000ffff0000ull) << 16;
538 r |= (a & 0x0000ffff00000000ull) >> 16;
539 r |= (a & 0xffff000000000000ull) >> 48;
543 uint64_t helper_memory_to_g (uint64_t a)
546 r = (a & 0x000000000000ffffull) << 48;
547 r |= (a & 0x00000000ffff0000ull) << 16;
548 r |= (a & 0x0000ffff00000000ull) >> 16;
549 r |= (a & 0xffff000000000000ull) >> 48;
553 uint64_t helper_addg (uint64_t a, uint64_t b)
557 fa = g_to_float64(a);
558 fb = g_to_float64(b);
559 fr = float64_add(fa, fb, &FP_STATUS);
560 return float64_to_g(fr);
563 uint64_t helper_subg (uint64_t a, uint64_t b)
567 fa = g_to_float64(a);
568 fb = g_to_float64(b);
569 fr = float64_sub(fa, fb, &FP_STATUS);
570 return float64_to_g(fr);
573 uint64_t helper_mulg (uint64_t a, uint64_t b)
577 fa = g_to_float64(a);
578 fb = g_to_float64(b);
579 fr = float64_mul(fa, fb, &FP_STATUS);
580 return float64_to_g(fr);
583 uint64_t helper_divg (uint64_t a, uint64_t b)
587 fa = g_to_float64(a);
588 fb = g_to_float64(b);
589 fr = float64_div(fa, fb, &FP_STATUS);
590 return float64_to_g(fr);
593 uint64_t helper_sqrtg (uint64_t a)
597 fa = g_to_float64(a);
598 fr = float64_sqrt(fa, &FP_STATUS);
599 return float64_to_g(fr);
603 /* S floating (single) */
604 static always_inline uint64_t float32_to_s (float32 fa)
609 a = *(uint32_t*)(&fa);
611 r = (((uint64_t)(a & 0xc0000000)) << 32) | (((uint64_t)(a & 0x3fffffff)) << 29);
612 if (((a & 0x7f800000) != 0x7f800000) && (!(a & 0x40000000)))
617 static always_inline float32 s_to_float32 (uint64_t a)
619 uint32_t r = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
620 return *(float32*)(&r);
623 uint32_t helper_s_to_memory (uint64_t a)
625 /* Memory format is the same as float32 */
626 float32 fa = s_to_float32(a);
627 return *(uint32_t*)(&fa);
630 uint64_t helper_memory_to_s (uint32_t a)
632 /* Memory format is the same as float32 */
633 return float32_to_s(*(float32*)(&a));
636 uint64_t helper_adds (uint64_t a, uint64_t b)
640 fa = s_to_float32(a);
641 fb = s_to_float32(b);
642 fr = float32_add(fa, fb, &FP_STATUS);
643 return float32_to_s(fr);
646 uint64_t helper_subs (uint64_t a, uint64_t b)
650 fa = s_to_float32(a);
651 fb = s_to_float32(b);
652 fr = float32_sub(fa, fb, &FP_STATUS);
653 return float32_to_s(fr);
656 uint64_t helper_muls (uint64_t a, uint64_t b)
660 fa = s_to_float32(a);
661 fb = s_to_float32(b);
662 fr = float32_mul(fa, fb, &FP_STATUS);
663 return float32_to_s(fr);
666 uint64_t helper_divs (uint64_t a, uint64_t b)
670 fa = s_to_float32(a);
671 fb = s_to_float32(b);
672 fr = float32_div(fa, fb, &FP_STATUS);
673 return float32_to_s(fr);
676 uint64_t helper_sqrts (uint64_t a)
680 fa = s_to_float32(a);
681 fr = float32_sqrt(fa, &FP_STATUS);
682 return float32_to_s(fr);
686 /* T floating (double) */
687 static always_inline float64 t_to_float64 (uint64_t a)
689 /* Memory format is the same as float64 */
690 return *(float64*)(&a);
693 static always_inline uint64_t float64_to_t (float64 fa)
695 /* Memory format is the same as float64 */
696 return *(uint64*)(&fa);
699 uint64_t helper_addt (uint64_t a, uint64_t b)
703 fa = t_to_float64(a);
704 fb = t_to_float64(b);
705 fr = float64_add(fa, fb, &FP_STATUS);
706 return float64_to_t(fr);
709 uint64_t helper_subt (uint64_t a, uint64_t b)
713 fa = t_to_float64(a);
714 fb = t_to_float64(b);
715 fr = float64_sub(fa, fb, &FP_STATUS);
716 return float64_to_t(fr);
719 uint64_t helper_mult (uint64_t a, uint64_t b)
723 fa = t_to_float64(a);
724 fb = t_to_float64(b);
725 fr = float64_mul(fa, fb, &FP_STATUS);
726 return float64_to_t(fr);
729 uint64_t helper_divt (uint64_t a, uint64_t b)
733 fa = t_to_float64(a);
734 fb = t_to_float64(b);
735 fr = float64_div(fa, fb, &FP_STATUS);
736 return float64_to_t(fr);
739 uint64_t helper_sqrtt (uint64_t a)
743 fa = t_to_float64(a);
744 fr = float64_sqrt(fa, &FP_STATUS);
745 return float64_to_t(fr);
750 uint64_t helper_cpys(uint64_t a, uint64_t b)
752 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
755 uint64_t helper_cpysn(uint64_t a, uint64_t b)
757 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
760 uint64_t helper_cpyse(uint64_t a, uint64_t b)
762 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
767 uint64_t helper_cmptun (uint64_t a, uint64_t b)
771 fa = t_to_float64(a);
772 fb = t_to_float64(b);
774 if (float64_is_nan(fa) || float64_is_nan(fb))
775 return 0x4000000000000000ULL;
780 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
784 fa = t_to_float64(a);
785 fb = t_to_float64(b);
787 if (float64_eq(fa, fb, &FP_STATUS))
788 return 0x4000000000000000ULL;
793 uint64_t helper_cmptle(uint64_t a, uint64_t b)
797 fa = t_to_float64(a);
798 fb = t_to_float64(b);
800 if (float64_le(fa, fb, &FP_STATUS))
801 return 0x4000000000000000ULL;
806 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
810 fa = t_to_float64(a);
811 fb = t_to_float64(b);
813 if (float64_lt(fa, fb, &FP_STATUS))
814 return 0x4000000000000000ULL;
819 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
823 fa = g_to_float64(a);
824 fb = g_to_float64(b);
826 if (float64_eq(fa, fb, &FP_STATUS))
827 return 0x4000000000000000ULL;
832 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
836 fa = g_to_float64(a);
837 fb = g_to_float64(b);
839 if (float64_le(fa, fb, &FP_STATUS))
840 return 0x4000000000000000ULL;
845 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
849 fa = g_to_float64(a);
850 fb = g_to_float64(b);
852 if (float64_lt(fa, fb, &FP_STATUS))
853 return 0x4000000000000000ULL;
858 uint64_t helper_cmpfeq (uint64_t a)
860 return !(a & 0x7FFFFFFFFFFFFFFFULL);
863 uint64_t helper_cmpfne (uint64_t a)
865 return (a & 0x7FFFFFFFFFFFFFFFULL);
868 uint64_t helper_cmpflt (uint64_t a)
870 return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
873 uint64_t helper_cmpfle (uint64_t a)
875 return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
878 uint64_t helper_cmpfgt (uint64_t a)
880 return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
883 uint64_t helper_cmpfge (uint64_t a)
885 return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
889 /* Floating point format conversion */
890 uint64_t helper_cvtts (uint64_t a)
895 fa = t_to_float64(a);
896 fr = float64_to_float32(fa, &FP_STATUS);
897 return float32_to_s(fr);
900 uint64_t helper_cvtst (uint64_t a)
905 fa = s_to_float32(a);
906 fr = float32_to_float64(fa, &FP_STATUS);
907 return float64_to_t(fr);
910 uint64_t helper_cvtqs (uint64_t a)
912 float32 fr = int64_to_float32(a, &FP_STATUS);
913 return float32_to_s(fr);
916 uint64_t helper_cvttq (uint64_t a)
918 float64 fa = t_to_float64(a);
919 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
922 uint64_t helper_cvtqt (uint64_t a)
924 float64 fr = int64_to_float64(a, &FP_STATUS);
925 return float64_to_t(fr);
928 uint64_t helper_cvtqf (uint64_t a)
930 float32 fr = int64_to_float32(a, &FP_STATUS);
931 return float32_to_f(fr);
934 uint64_t helper_cvtgf (uint64_t a)
939 fa = g_to_float64(a);
940 fr = float64_to_float32(fa, &FP_STATUS);
941 return float32_to_f(fr);
944 uint64_t helper_cvtgq (uint64_t a)
946 float64 fa = g_to_float64(a);
947 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
950 uint64_t helper_cvtqg (uint64_t a)
953 fr = int64_to_float64(a, &FP_STATUS);
954 return float64_to_g(fr);
957 uint64_t helper_cvtlq (uint64_t a)
959 return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
962 static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
966 r = ((uint64_t)(a & 0xC0000000)) << 32;
967 r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
969 if (v && (int64_t)((int32_t)r) != (int64_t)r) {
970 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
978 uint64_t helper_cvtql (uint64_t a)
980 return __helper_cvtql(a, 0, 0);
983 uint64_t helper_cvtqlv (uint64_t a)
985 return __helper_cvtql(a, 0, 1);
988 uint64_t helper_cvtqlsv (uint64_t a)
990 return __helper_cvtql(a, 1, 1);
993 #if !defined (CONFIG_USER_ONLY)
994 void helper_mfpr (int iprn)
998 if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1002 void helper_mtpr (int iprn)
1004 cpu_alpha_mtpr(env, iprn, T0, NULL);
1008 /*****************************************************************************/
1009 /* Softmmu support */
1010 #if !defined (CONFIG_USER_ONLY)
1012 /* XXX: the two following helpers are pure hacks.
1013 * Hopefully, we emulate the PALcode, then we should never see
1014 * HW_LD / HW_ST instructions.
1016 void helper_ld_phys_to_virt (void)
1018 uint64_t tlb_addr, physaddr;
1022 mmu_idx = cpu_mmu_index(env);
1023 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1025 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1026 if ((T0 & TARGET_PAGE_MASK) ==
1027 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1028 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1030 /* the page is not in the TLB : fill it */
1032 tlb_fill(T0, 0, mmu_idx, retaddr);
1038 void helper_st_phys_to_virt (void)
1040 uint64_t tlb_addr, physaddr;
1044 mmu_idx = cpu_mmu_index(env);
1045 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1047 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1048 if ((T0 & TARGET_PAGE_MASK) ==
1049 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1050 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1052 /* the page is not in the TLB : fill it */
1054 tlb_fill(T0, 1, mmu_idx, retaddr);
1060 #define MMUSUFFIX _mmu
1063 #include "softmmu_template.h"
1066 #include "softmmu_template.h"
1069 #include "softmmu_template.h"
1072 #include "softmmu_template.h"
1074 /* try to fill the TLB and return an exception if error. If retaddr is
1075 NULL, it means that the function was called in C code (i.e. not
1076 from generated code or from helper.c) */
1077 /* XXX: fix it to restore all registers */
1078 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1080 TranslationBlock *tb;
1081 CPUState *saved_env;
1085 /* XXX: hack to restore env in all cases, even if not called from
1088 env = cpu_single_env;
1089 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1090 if (!likely(ret == 0)) {
1091 if (likely(retaddr)) {
1092 /* now we have a real cpu fault */
1093 pc = (unsigned long)retaddr;
1094 tb = tb_find_pc(pc);
1096 /* the PC is inside the translated code. It means that we have
1097 a virtual CPU fault */
1098 cpu_restore_state(tb, env, pc, NULL);
1101 /* Exception index and error code are already set */