2 #include "host-utils.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
16 #define DPRINTF_MMU(fmt, args...) \
17 do { printf("MMU: " fmt , ##args); } while (0)
19 #define DPRINTF_MMU(fmt, args...) do {} while (0)
23 #define DPRINTF_MXCC(fmt, args...) \
24 do { printf("MXCC: " fmt , ##args); } while (0)
26 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
30 #define DPRINTF_ASI(fmt, args...) \
31 do { printf("ASI: " fmt , ##args); } while (0)
36 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #define AM_CHECK(env1) (1)
42 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
43 // Calculates TSB pointer value for fault page size 8k or 64k
44 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
45 uint64_t tag_access_register,
48 uint64_t tsb_base = tsb_register & ~0x1fffULL;
49 int tsb_split = (env->dmmuregs[5] & 0x1000ULL) ? 1 : 0;
50 int tsb_size = env->dmmuregs[5] & 0xf;
52 // discard lower 13 bits which hold tag access context
53 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
56 uint64_t tsb_base_mask = ~0x1fffULL;
57 uint64_t va = tag_access_va;
59 // move va bits to correct position
60 if (page_size == 8*1024) {
62 } else if (page_size == 64*1024) {
67 tsb_base_mask <<= tsb_size;
70 // calculate tsb_base mask and adjust va if split is in use
72 if (page_size == 8*1024) {
73 va &= ~(1ULL << (13 + tsb_size));
74 } else if (page_size == 64*1024) {
75 va |= (1ULL << (13 + tsb_size));
80 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
83 // Calculates tag target register value by reordering bits
84 // in tag access register
85 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
87 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
92 static inline void address_mask(CPUState *env1, target_ulong *addr)
96 *addr &= 0xffffffffULL;
100 static void raise_exception(int tt)
102 env->exception_index = tt;
106 void HELPER(raise_exception)(int tt)
111 static inline void set_cwp(int new_cwp)
113 cpu_set_cwp(env, new_cwp);
116 void helper_check_align(target_ulong addr, uint32_t align)
119 #ifdef DEBUG_UNALIGNED
120 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
121 "\n", addr, env->pc);
123 raise_exception(TT_UNALIGNED);
127 #define F_HELPER(name, p) void helper_f##name##p(void)
129 #define F_BINOP(name) \
130 float32 helper_f ## name ## s (float32 src1, float32 src2) \
132 return float32_ ## name (src1, src2, &env->fp_status); \
136 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
140 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
149 void helper_fsmuld(float32 src1, float32 src2)
151 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
152 float32_to_float64(src2, &env->fp_status),
156 void helper_fdmulq(void)
158 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
159 float64_to_float128(DT1, &env->fp_status),
163 float32 helper_fnegs(float32 src)
165 return float32_chs(src);
168 #ifdef TARGET_SPARC64
171 DT0 = float64_chs(DT1);
176 QT0 = float128_chs(QT1);
180 /* Integer to float conversion. */
181 float32 helper_fitos(int32_t src)
183 return int32_to_float32(src, &env->fp_status);
186 void helper_fitod(int32_t src)
188 DT0 = int32_to_float64(src, &env->fp_status);
191 void helper_fitoq(int32_t src)
193 QT0 = int32_to_float128(src, &env->fp_status);
196 #ifdef TARGET_SPARC64
197 float32 helper_fxtos(void)
199 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
204 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
209 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
214 /* floating point conversion */
215 float32 helper_fdtos(void)
217 return float64_to_float32(DT1, &env->fp_status);
220 void helper_fstod(float32 src)
222 DT0 = float32_to_float64(src, &env->fp_status);
225 float32 helper_fqtos(void)
227 return float128_to_float32(QT1, &env->fp_status);
230 void helper_fstoq(float32 src)
232 QT0 = float32_to_float128(src, &env->fp_status);
235 void helper_fqtod(void)
237 DT0 = float128_to_float64(QT1, &env->fp_status);
240 void helper_fdtoq(void)
242 QT0 = float64_to_float128(DT1, &env->fp_status);
245 /* Float to integer conversion. */
246 int32_t helper_fstoi(float32 src)
248 return float32_to_int32_round_to_zero(src, &env->fp_status);
251 int32_t helper_fdtoi(void)
253 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
256 int32_t helper_fqtoi(void)
258 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
261 #ifdef TARGET_SPARC64
262 void helper_fstox(float32 src)
264 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
267 void helper_fdtox(void)
269 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
272 void helper_fqtox(void)
274 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
277 void helper_faligndata(void)
281 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
282 /* on many architectures a shift of 64 does nothing */
283 if ((env->gsr & 7) != 0) {
284 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
286 *((uint64_t *)&DT0) = tmp;
289 #ifdef WORDS_BIGENDIAN
290 #define VIS_B64(n) b[7 - (n)]
291 #define VIS_W64(n) w[3 - (n)]
292 #define VIS_SW64(n) sw[3 - (n)]
293 #define VIS_L64(n) l[1 - (n)]
294 #define VIS_B32(n) b[3 - (n)]
295 #define VIS_W32(n) w[1 - (n)]
297 #define VIS_B64(n) b[n]
298 #define VIS_W64(n) w[n]
299 #define VIS_SW64(n) sw[n]
300 #define VIS_L64(n) l[n]
301 #define VIS_B32(n) b[n]
302 #define VIS_W32(n) w[n]
320 void helper_fpmerge(void)
327 // Reverse calculation order to handle overlap
328 d.VIS_B64(7) = s.VIS_B64(3);
329 d.VIS_B64(6) = d.VIS_B64(3);
330 d.VIS_B64(5) = s.VIS_B64(2);
331 d.VIS_B64(4) = d.VIS_B64(2);
332 d.VIS_B64(3) = s.VIS_B64(1);
333 d.VIS_B64(2) = d.VIS_B64(1);
334 d.VIS_B64(1) = s.VIS_B64(0);
335 //d.VIS_B64(0) = d.VIS_B64(0);
340 void helper_fmul8x16(void)
349 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
350 if ((tmp & 0xff) > 0x7f) \
352 d.VIS_W64(r) = tmp >> 8;
363 void helper_fmul8x16al(void)
372 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
373 if ((tmp & 0xff) > 0x7f) \
375 d.VIS_W64(r) = tmp >> 8;
386 void helper_fmul8x16au(void)
395 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
396 if ((tmp & 0xff) > 0x7f) \
398 d.VIS_W64(r) = tmp >> 8;
409 void helper_fmul8sux16(void)
418 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
419 if ((tmp & 0xff) > 0x7f) \
421 d.VIS_W64(r) = tmp >> 8;
432 void helper_fmul8ulx16(void)
441 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
442 if ((tmp & 0xff) > 0x7f) \
444 d.VIS_W64(r) = tmp >> 8;
455 void helper_fmuld8sux16(void)
464 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
465 if ((tmp & 0xff) > 0x7f) \
469 // Reverse calculation order to handle overlap
477 void helper_fmuld8ulx16(void)
486 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
487 if ((tmp & 0xff) > 0x7f) \
491 // Reverse calculation order to handle overlap
499 void helper_fexpand(void)
504 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
506 d.VIS_W64(0) = s.VIS_B32(0) << 4;
507 d.VIS_W64(1) = s.VIS_B32(1) << 4;
508 d.VIS_W64(2) = s.VIS_B32(2) << 4;
509 d.VIS_W64(3) = s.VIS_B32(3) << 4;
514 #define VIS_HELPER(name, F) \
515 void name##16(void) \
522 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
523 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
524 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
525 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
530 uint32_t name##16s(uint32_t src1, uint32_t src2) \
537 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
538 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
543 void name##32(void) \
550 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
551 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
556 uint32_t name##32s(uint32_t src1, uint32_t src2) \
568 #define FADD(a, b) ((a) + (b))
569 #define FSUB(a, b) ((a) - (b))
570 VIS_HELPER(helper_fpadd, FADD)
571 VIS_HELPER(helper_fpsub, FSUB)
573 #define VIS_CMPHELPER(name, F) \
574 void name##16(void) \
581 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
582 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
583 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
584 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
589 void name##32(void) \
596 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
597 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
602 #define FCMPGT(a, b) ((a) > (b))
603 #define FCMPEQ(a, b) ((a) == (b))
604 #define FCMPLE(a, b) ((a) <= (b))
605 #define FCMPNE(a, b) ((a) != (b))
607 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
608 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
609 VIS_CMPHELPER(helper_fcmple, FCMPLE)
610 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
613 void helper_check_ieee_exceptions(void)
617 status = get_float_exception_flags(&env->fp_status);
619 /* Copy IEEE 754 flags into FSR */
620 if (status & float_flag_invalid)
622 if (status & float_flag_overflow)
624 if (status & float_flag_underflow)
626 if (status & float_flag_divbyzero)
628 if (status & float_flag_inexact)
631 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
632 /* Unmasked exception, generate a trap */
633 env->fsr |= FSR_FTT_IEEE_EXCP;
634 raise_exception(TT_FP_EXCP);
636 /* Accumulate exceptions */
637 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
642 void helper_clear_float_exceptions(void)
644 set_float_exception_flags(0, &env->fp_status);
647 float32 helper_fabss(float32 src)
649 return float32_abs(src);
652 #ifdef TARGET_SPARC64
653 void helper_fabsd(void)
655 DT0 = float64_abs(DT1);
658 void helper_fabsq(void)
660 QT0 = float128_abs(QT1);
664 float32 helper_fsqrts(float32 src)
666 return float32_sqrt(src, &env->fp_status);
669 void helper_fsqrtd(void)
671 DT0 = float64_sqrt(DT1, &env->fp_status);
674 void helper_fsqrtq(void)
676 QT0 = float128_sqrt(QT1, &env->fp_status);
679 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
680 void glue(helper_, name) (void) \
682 target_ulong new_fsr; \
684 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
685 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
686 case float_relation_unordered: \
687 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
688 if ((env->fsr & FSR_NVM) || TRAP) { \
689 env->fsr |= new_fsr; \
690 env->fsr |= FSR_NVC; \
691 env->fsr |= FSR_FTT_IEEE_EXCP; \
692 raise_exception(TT_FP_EXCP); \
694 env->fsr |= FSR_NVA; \
697 case float_relation_less: \
698 new_fsr = FSR_FCC0 << FS; \
700 case float_relation_greater: \
701 new_fsr = FSR_FCC1 << FS; \
707 env->fsr |= new_fsr; \
709 #define GEN_FCMPS(name, size, FS, TRAP) \
710 void glue(helper_, name)(float32 src1, float32 src2) \
712 target_ulong new_fsr; \
714 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
715 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
716 case float_relation_unordered: \
717 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
718 if ((env->fsr & FSR_NVM) || TRAP) { \
719 env->fsr |= new_fsr; \
720 env->fsr |= FSR_NVC; \
721 env->fsr |= FSR_FTT_IEEE_EXCP; \
722 raise_exception(TT_FP_EXCP); \
724 env->fsr |= FSR_NVA; \
727 case float_relation_less: \
728 new_fsr = FSR_FCC0 << FS; \
730 case float_relation_greater: \
731 new_fsr = FSR_FCC1 << FS; \
737 env->fsr |= new_fsr; \
740 GEN_FCMPS(fcmps, float32, 0, 0);
741 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
743 GEN_FCMPS(fcmpes, float32, 0, 1);
744 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
746 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
747 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
749 static uint32_t compute_all_flags(void)
751 return env->psr & PSR_ICC;
754 static uint32_t compute_C_flags(void)
756 return env->psr & PSR_CARRY;
759 #ifdef TARGET_SPARC64
760 static uint32_t compute_all_flags_xcc(void)
762 return env->xcc & PSR_ICC;
765 static uint32_t compute_C_flags_xcc(void)
767 return env->xcc & PSR_CARRY;
772 typedef struct CCTable {
773 uint32_t (*compute_all)(void); /* return all the flags */
774 uint32_t (*compute_c)(void); /* return the C flag */
777 static const CCTable icc_table[CC_OP_NB] = {
778 /* CC_OP_DYNAMIC should never happen */
779 [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
782 #ifdef TARGET_SPARC64
783 static const CCTable xcc_table[CC_OP_NB] = {
784 /* CC_OP_DYNAMIC should never happen */
785 [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
789 void helper_compute_psr(void)
793 new_psr = icc_table[CC_OP].compute_all();
795 #ifdef TARGET_SPARC64
796 new_psr = xcc_table[CC_OP].compute_all();
802 uint32_t helper_compute_C_icc(void)
806 ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
810 #ifdef TARGET_SPARC64
811 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
812 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
813 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
815 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
816 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
817 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
819 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
820 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
821 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
823 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
824 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
825 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
827 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
828 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
829 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
831 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
832 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
833 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
837 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
839 static void dump_mxcc(CPUState *env)
841 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
842 env->mxccdata[0], env->mxccdata[1],
843 env->mxccdata[2], env->mxccdata[3]);
844 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
845 " %016llx %016llx %016llx %016llx\n",
846 env->mxccregs[0], env->mxccregs[1],
847 env->mxccregs[2], env->mxccregs[3],
848 env->mxccregs[4], env->mxccregs[5],
849 env->mxccregs[6], env->mxccregs[7]);
853 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
854 && defined(DEBUG_ASI)
855 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
861 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
862 addr, asi, r1 & 0xff);
865 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
866 addr, asi, r1 & 0xffff);
869 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
870 addr, asi, r1 & 0xffffffff);
873 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
880 #ifndef TARGET_SPARC64
881 #ifndef CONFIG_USER_ONLY
882 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
885 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
886 uint32_t last_addr = addr;
889 helper_check_align(addr, size - 1);
891 case 2: /* SuperSparc MXCC registers */
893 case 0x01c00a00: /* MXCC control register */
895 ret = env->mxccregs[3];
897 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
900 case 0x01c00a04: /* MXCC control register */
902 ret = env->mxccregs[3];
904 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
907 case 0x01c00c00: /* Module reset register */
909 ret = env->mxccregs[5];
910 // should we do something here?
912 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
915 case 0x01c00f00: /* MBus port address register */
917 ret = env->mxccregs[7];
919 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
923 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
927 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
928 "addr = %08x -> ret = %" PRIx64 ","
929 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
934 case 3: /* MMU probe */
938 mmulev = (addr >> 8) & 15;
942 ret = mmu_probe(env, addr, mmulev);
943 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
947 case 4: /* read MMU regs */
949 int reg = (addr >> 8) & 0x1f;
951 ret = env->mmuregs[reg];
952 if (reg == 3) /* Fault status cleared on read */
954 else if (reg == 0x13) /* Fault status read */
955 ret = env->mmuregs[3];
956 else if (reg == 0x14) /* Fault address read */
957 ret = env->mmuregs[4];
958 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
961 case 5: // Turbosparc ITLB Diagnostic
962 case 6: // Turbosparc DTLB Diagnostic
963 case 7: // Turbosparc IOTLB Diagnostic
965 case 9: /* Supervisor code access */
968 ret = ldub_code(addr);
971 ret = lduw_code(addr);
975 ret = ldl_code(addr);
978 ret = ldq_code(addr);
982 case 0xa: /* User data access */
985 ret = ldub_user(addr);
988 ret = lduw_user(addr);
992 ret = ldl_user(addr);
995 ret = ldq_user(addr);
999 case 0xb: /* Supervisor data access */
1002 ret = ldub_kernel(addr);
1005 ret = lduw_kernel(addr);
1009 ret = ldl_kernel(addr);
1012 ret = ldq_kernel(addr);
1016 case 0xc: /* I-cache tag */
1017 case 0xd: /* I-cache data */
1018 case 0xe: /* D-cache tag */
1019 case 0xf: /* D-cache data */
1021 case 0x20: /* MMU passthrough */
1024 ret = ldub_phys(addr);
1027 ret = lduw_phys(addr);
1031 ret = ldl_phys(addr);
1034 ret = ldq_phys(addr);
1038 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1041 ret = ldub_phys((target_phys_addr_t)addr
1042 | ((target_phys_addr_t)(asi & 0xf) << 32));
1045 ret = lduw_phys((target_phys_addr_t)addr
1046 | ((target_phys_addr_t)(asi & 0xf) << 32));
1050 ret = ldl_phys((target_phys_addr_t)addr
1051 | ((target_phys_addr_t)(asi & 0xf) << 32));
1054 ret = ldq_phys((target_phys_addr_t)addr
1055 | ((target_phys_addr_t)(asi & 0xf) << 32));
1059 case 0x30: // Turbosparc secondary cache diagnostic
1060 case 0x31: // Turbosparc RAM snoop
1061 case 0x32: // Turbosparc page table descriptor diagnostic
1062 case 0x39: /* data cache diagnostic register */
1065 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1067 int reg = (addr >> 8) & 3;
1070 case 0: /* Breakpoint Value (Addr) */
1071 ret = env->mmubpregs[reg];
1073 case 1: /* Breakpoint Mask */
1074 ret = env->mmubpregs[reg];
1076 case 2: /* Breakpoint Control */
1077 ret = env->mmubpregs[reg];
1079 case 3: /* Breakpoint Status */
1080 ret = env->mmubpregs[reg];
1081 env->mmubpregs[reg] = 0ULL;
1084 DPRINTF_MMU("read breakpoint reg[%d] 0x%016llx\n", reg, ret);
1087 case 8: /* User code access, XXX */
1089 do_unassigned_access(addr, 0, 0, asi, size);
1099 ret = (int16_t) ret;
1102 ret = (int32_t) ret;
1109 dump_asi("read ", last_addr, asi, size, ret);
1114 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1116 helper_check_align(addr, size - 1);
1118 case 2: /* SuperSparc MXCC registers */
1120 case 0x01c00000: /* MXCC stream data register 0 */
1122 env->mxccdata[0] = val;
1124 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1127 case 0x01c00008: /* MXCC stream data register 1 */
1129 env->mxccdata[1] = val;
1131 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1134 case 0x01c00010: /* MXCC stream data register 2 */
1136 env->mxccdata[2] = val;
1138 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1141 case 0x01c00018: /* MXCC stream data register 3 */
1143 env->mxccdata[3] = val;
1145 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1148 case 0x01c00100: /* MXCC stream source */
1150 env->mxccregs[0] = val;
1152 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1154 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1156 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1158 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1160 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1163 case 0x01c00200: /* MXCC stream destination */
1165 env->mxccregs[1] = val;
1167 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1169 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1171 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1173 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1175 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1178 case 0x01c00a00: /* MXCC control register */
1180 env->mxccregs[3] = val;
1182 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1185 case 0x01c00a04: /* MXCC control register */
1187 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1190 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1193 case 0x01c00e00: /* MXCC error register */
1194 // writing a 1 bit clears the error
1196 env->mxccregs[6] &= ~val;
1198 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1201 case 0x01c00f00: /* MBus port address register */
1203 env->mxccregs[7] = val;
1205 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1209 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1213 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1214 asi, size, addr, val);
1219 case 3: /* MMU flush */
1223 mmulev = (addr >> 8) & 15;
1224 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1226 case 0: // flush page
1227 tlb_flush_page(env, addr & 0xfffff000);
1229 case 1: // flush segment (256k)
1230 case 2: // flush region (16M)
1231 case 3: // flush context (4G)
1232 case 4: // flush entire
1243 case 4: /* write MMU regs */
1245 int reg = (addr >> 8) & 0x1f;
1248 oldreg = env->mmuregs[reg];
1250 case 0: // Control Register
1251 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1253 // Mappings generated during no-fault mode or MMU
1254 // disabled mode are invalid in normal mode
1255 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1256 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1259 case 1: // Context Table Pointer Register
1260 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1262 case 2: // Context Register
1263 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1264 if (oldreg != env->mmuregs[reg]) {
1265 /* we flush when the MMU context changes because
1266 QEMU has no MMU context support */
1270 case 3: // Synchronous Fault Status Register with Clear
1271 case 4: // Synchronous Fault Address Register
1273 case 0x10: // TLB Replacement Control Register
1274 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1276 case 0x13: // Synchronous Fault Status Register with Read and Clear
1277 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1279 case 0x14: // Synchronous Fault Address Register
1280 env->mmuregs[4] = val;
1283 env->mmuregs[reg] = val;
1286 if (oldreg != env->mmuregs[reg]) {
1287 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1288 reg, oldreg, env->mmuregs[reg]);
1295 case 5: // Turbosparc ITLB Diagnostic
1296 case 6: // Turbosparc DTLB Diagnostic
1297 case 7: // Turbosparc IOTLB Diagnostic
1299 case 0xa: /* User data access */
1302 stb_user(addr, val);
1305 stw_user(addr, val);
1309 stl_user(addr, val);
1312 stq_user(addr, val);
1316 case 0xb: /* Supervisor data access */
1319 stb_kernel(addr, val);
1322 stw_kernel(addr, val);
1326 stl_kernel(addr, val);
1329 stq_kernel(addr, val);
1333 case 0xc: /* I-cache tag */
1334 case 0xd: /* I-cache data */
1335 case 0xe: /* D-cache tag */
1336 case 0xf: /* D-cache data */
1337 case 0x10: /* I/D-cache flush page */
1338 case 0x11: /* I/D-cache flush segment */
1339 case 0x12: /* I/D-cache flush region */
1340 case 0x13: /* I/D-cache flush context */
1341 case 0x14: /* I/D-cache flush user */
1343 case 0x17: /* Block copy, sta access */
1349 uint32_t src = val & ~3, dst = addr & ~3, temp;
1351 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1352 temp = ldl_kernel(src);
1353 stl_kernel(dst, temp);
1357 case 0x1f: /* Block fill, stda access */
1360 // fill 32 bytes with val
1362 uint32_t dst = addr & 7;
1364 for (i = 0; i < 32; i += 8, dst += 8)
1365 stq_kernel(dst, val);
1368 case 0x20: /* MMU passthrough */
1372 stb_phys(addr, val);
1375 stw_phys(addr, val);
1379 stl_phys(addr, val);
1382 stq_phys(addr, val);
1387 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1391 stb_phys((target_phys_addr_t)addr
1392 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1395 stw_phys((target_phys_addr_t)addr
1396 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1400 stl_phys((target_phys_addr_t)addr
1401 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1404 stq_phys((target_phys_addr_t)addr
1405 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1410 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1411 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1412 // Turbosparc snoop RAM
1413 case 0x32: // store buffer control or Turbosparc page table
1414 // descriptor diagnostic
1415 case 0x36: /* I-cache flash clear */
1416 case 0x37: /* D-cache flash clear */
1417 case 0x4c: /* breakpoint action */
1419 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1421 int reg = (addr >> 8) & 3;
1424 case 0: /* Breakpoint Value (Addr) */
1425 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1427 case 1: /* Breakpoint Mask */
1428 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1430 case 2: /* Breakpoint Control */
1431 env->mmubpregs[reg] = (val & 0x7fULL);
1433 case 3: /* Breakpoint Status */
1434 env->mmubpregs[reg] = (val & 0xfULL);
1437 DPRINTF_MMU("write breakpoint reg[%d] 0x%016llx\n", reg,
1441 case 8: /* User code access, XXX */
1442 case 9: /* Supervisor code access, XXX */
1444 do_unassigned_access(addr, 1, 0, asi, size);
1448 dump_asi("write", addr, asi, size, val);
1452 #endif /* CONFIG_USER_ONLY */
1453 #else /* TARGET_SPARC64 */
1455 #ifdef CONFIG_USER_ONLY
1456 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1459 #if defined(DEBUG_ASI)
1460 target_ulong last_addr = addr;
1464 raise_exception(TT_PRIV_ACT);
1466 helper_check_align(addr, size - 1);
1467 address_mask(env, &addr);
1470 case 0x82: // Primary no-fault
1471 case 0x8a: // Primary no-fault LE
1472 if (page_check_range(addr, size, PAGE_READ) == -1) {
1474 dump_asi("read ", last_addr, asi, size, ret);
1479 case 0x80: // Primary
1480 case 0x88: // Primary LE
1484 ret = ldub_raw(addr);
1487 ret = lduw_raw(addr);
1490 ret = ldl_raw(addr);
1494 ret = ldq_raw(addr);
1499 case 0x83: // Secondary no-fault
1500 case 0x8b: // Secondary no-fault LE
1501 if (page_check_range(addr, size, PAGE_READ) == -1) {
1503 dump_asi("read ", last_addr, asi, size, ret);
1508 case 0x81: // Secondary
1509 case 0x89: // Secondary LE
1516 /* Convert from little endian */
1518 case 0x88: // Primary LE
1519 case 0x89: // Secondary LE
1520 case 0x8a: // Primary no-fault LE
1521 case 0x8b: // Secondary no-fault LE
1539 /* Convert to signed number */
1546 ret = (int16_t) ret;
1549 ret = (int32_t) ret;
1556 dump_asi("read ", last_addr, asi, size, ret);
1561 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1564 dump_asi("write", addr, asi, size, val);
1567 raise_exception(TT_PRIV_ACT);
1569 helper_check_align(addr, size - 1);
1570 address_mask(env, &addr);
1572 /* Convert to little endian */
1574 case 0x88: // Primary LE
1575 case 0x89: // Secondary LE
1578 addr = bswap16(addr);
1581 addr = bswap32(addr);
1584 addr = bswap64(addr);
1594 case 0x80: // Primary
1595 case 0x88: // Primary LE
1614 case 0x81: // Secondary
1615 case 0x89: // Secondary LE
1619 case 0x82: // Primary no-fault, RO
1620 case 0x83: // Secondary no-fault, RO
1621 case 0x8a: // Primary no-fault LE, RO
1622 case 0x8b: // Secondary no-fault LE, RO
1624 do_unassigned_access(addr, 1, 0, 1, size);
1629 #else /* CONFIG_USER_ONLY */
1631 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1634 #if defined(DEBUG_ASI)
1635 target_ulong last_addr = addr;
1638 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1639 || ((env->def->features & CPU_FEATURE_HYPV)
1640 && asi >= 0x30 && asi < 0x80
1641 && !(env->hpstate & HS_PRIV)))
1642 raise_exception(TT_PRIV_ACT);
1644 helper_check_align(addr, size - 1);
1646 case 0x82: // Primary no-fault
1647 case 0x8a: // Primary no-fault LE
1648 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1650 dump_asi("read ", last_addr, asi, size, ret);
1655 case 0x10: // As if user primary
1656 case 0x18: // As if user primary LE
1657 case 0x80: // Primary
1658 case 0x88: // Primary LE
1659 case 0xe2: // UA2007 Primary block init
1660 case 0xe3: // UA2007 Secondary block init
1661 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1662 if ((env->def->features & CPU_FEATURE_HYPV)
1663 && env->hpstate & HS_PRIV) {
1666 ret = ldub_hypv(addr);
1669 ret = lduw_hypv(addr);
1672 ret = ldl_hypv(addr);
1676 ret = ldq_hypv(addr);
1682 ret = ldub_kernel(addr);
1685 ret = lduw_kernel(addr);
1688 ret = ldl_kernel(addr);
1692 ret = ldq_kernel(addr);
1699 ret = ldub_user(addr);
1702 ret = lduw_user(addr);
1705 ret = ldl_user(addr);
1709 ret = ldq_user(addr);
1714 case 0x14: // Bypass
1715 case 0x15: // Bypass, non-cacheable
1716 case 0x1c: // Bypass LE
1717 case 0x1d: // Bypass, non-cacheable LE
1721 ret = ldub_phys(addr);
1724 ret = lduw_phys(addr);
1727 ret = ldl_phys(addr);
1731 ret = ldq_phys(addr);
1736 case 0x24: // Nucleus quad LDD 128 bit atomic
1737 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1738 // Only ldda allowed
1739 raise_exception(TT_ILL_INSN);
1741 case 0x83: // Secondary no-fault
1742 case 0x8b: // Secondary no-fault LE
1743 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1745 dump_asi("read ", last_addr, asi, size, ret);
1750 case 0x04: // Nucleus
1751 case 0x0c: // Nucleus Little Endian (LE)
1752 case 0x11: // As if user secondary
1753 case 0x19: // As if user secondary LE
1754 case 0x4a: // UPA config
1755 case 0x81: // Secondary
1756 case 0x89: // Secondary LE
1762 case 0x50: // I-MMU regs
1764 int reg = (addr >> 3) & 0xf;
1767 // I-TSB Tag Target register
1768 ret = ultrasparc_tag_target(env->immuregs[6]);
1770 ret = env->immuregs[reg];
1775 case 0x51: // I-MMU 8k TSB pointer
1777 // env->immuregs[5] holds I-MMU TSB register value
1778 // env->immuregs[6] holds I-MMU Tag Access register value
1779 ret = ultrasparc_tsb_pointer(env->immuregs[5], env->immuregs[6],
1783 case 0x52: // I-MMU 64k TSB pointer
1785 // env->immuregs[5] holds I-MMU TSB register value
1786 // env->immuregs[6] holds I-MMU Tag Access register value
1787 ret = ultrasparc_tsb_pointer(env->immuregs[5], env->immuregs[6],
1791 case 0x55: // I-MMU data access
1793 int reg = (addr >> 3) & 0x3f;
1795 ret = env->itlb_tte[reg];
1798 case 0x56: // I-MMU tag read
1800 int reg = (addr >> 3) & 0x3f;
1802 ret = env->itlb_tag[reg];
1805 case 0x58: // D-MMU regs
1807 int reg = (addr >> 3) & 0xf;
1810 // D-TSB Tag Target register
1811 ret = ultrasparc_tag_target(env->dmmuregs[6]);
1813 ret = env->dmmuregs[reg];
1817 case 0x59: // D-MMU 8k TSB pointer
1819 // env->dmmuregs[5] holds D-MMU TSB register value
1820 // env->dmmuregs[6] holds D-MMU Tag Access register value
1821 ret = ultrasparc_tsb_pointer(env->dmmuregs[5], env->dmmuregs[6],
1825 case 0x5a: // D-MMU 64k TSB pointer
1827 // env->dmmuregs[5] holds D-MMU TSB register value
1828 // env->dmmuregs[6] holds D-MMU Tag Access register value
1829 ret = ultrasparc_tsb_pointer(env->dmmuregs[5], env->dmmuregs[6],
1833 case 0x5d: // D-MMU data access
1835 int reg = (addr >> 3) & 0x3f;
1837 ret = env->dtlb_tte[reg];
1840 case 0x5e: // D-MMU tag read
1842 int reg = (addr >> 3) & 0x3f;
1844 ret = env->dtlb_tag[reg];
1847 case 0x46: // D-cache data
1848 case 0x47: // D-cache tag access
1849 case 0x4b: // E-cache error enable
1850 case 0x4c: // E-cache asynchronous fault status
1851 case 0x4d: // E-cache asynchronous fault address
1852 case 0x4e: // E-cache tag data
1853 case 0x66: // I-cache instruction access
1854 case 0x67: // I-cache tag access
1855 case 0x6e: // I-cache predecode
1856 case 0x6f: // I-cache LRU etc.
1857 case 0x76: // E-cache tag
1858 case 0x7e: // E-cache tag
1860 case 0x5b: // D-MMU data pointer
1861 case 0x48: // Interrupt dispatch, RO
1862 case 0x49: // Interrupt data receive
1863 case 0x7f: // Incoming interrupt vector, RO
1866 case 0x54: // I-MMU data in, WO
1867 case 0x57: // I-MMU demap, WO
1868 case 0x5c: // D-MMU data in, WO
1869 case 0x5f: // D-MMU demap, WO
1870 case 0x77: // Interrupt vector, WO
1872 do_unassigned_access(addr, 0, 0, 1, size);
1877 /* Convert from little endian */
1879 case 0x0c: // Nucleus Little Endian (LE)
1880 case 0x18: // As if user primary LE
1881 case 0x19: // As if user secondary LE
1882 case 0x1c: // Bypass LE
1883 case 0x1d: // Bypass, non-cacheable LE
1884 case 0x88: // Primary LE
1885 case 0x89: // Secondary LE
1886 case 0x8a: // Primary no-fault LE
1887 case 0x8b: // Secondary no-fault LE
1905 /* Convert to signed number */
1912 ret = (int16_t) ret;
1915 ret = (int32_t) ret;
1922 dump_asi("read ", last_addr, asi, size, ret);
1927 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1930 dump_asi("write", addr, asi, size, val);
1932 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1933 || ((env->def->features & CPU_FEATURE_HYPV)
1934 && asi >= 0x30 && asi < 0x80
1935 && !(env->hpstate & HS_PRIV)))
1936 raise_exception(TT_PRIV_ACT);
1938 helper_check_align(addr, size - 1);
1939 /* Convert to little endian */
1941 case 0x0c: // Nucleus Little Endian (LE)
1942 case 0x18: // As if user primary LE
1943 case 0x19: // As if user secondary LE
1944 case 0x1c: // Bypass LE
1945 case 0x1d: // Bypass, non-cacheable LE
1946 case 0x88: // Primary LE
1947 case 0x89: // Secondary LE
1950 addr = bswap16(addr);
1953 addr = bswap32(addr);
1956 addr = bswap64(addr);
1966 case 0x10: // As if user primary
1967 case 0x18: // As if user primary LE
1968 case 0x80: // Primary
1969 case 0x88: // Primary LE
1970 case 0xe2: // UA2007 Primary block init
1971 case 0xe3: // UA2007 Secondary block init
1972 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1973 if ((env->def->features & CPU_FEATURE_HYPV)
1974 && env->hpstate & HS_PRIV) {
1977 stb_hypv(addr, val);
1980 stw_hypv(addr, val);
1983 stl_hypv(addr, val);
1987 stq_hypv(addr, val);
1993 stb_kernel(addr, val);
1996 stw_kernel(addr, val);
1999 stl_kernel(addr, val);
2003 stq_kernel(addr, val);
2010 stb_user(addr, val);
2013 stw_user(addr, val);
2016 stl_user(addr, val);
2020 stq_user(addr, val);
2025 case 0x14: // Bypass
2026 case 0x15: // Bypass, non-cacheable
2027 case 0x1c: // Bypass LE
2028 case 0x1d: // Bypass, non-cacheable LE
2032 stb_phys(addr, val);
2035 stw_phys(addr, val);
2038 stl_phys(addr, val);
2042 stq_phys(addr, val);
2047 case 0x24: // Nucleus quad LDD 128 bit atomic
2048 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2049 // Only ldda allowed
2050 raise_exception(TT_ILL_INSN);
2052 case 0x04: // Nucleus
2053 case 0x0c: // Nucleus Little Endian (LE)
2054 case 0x11: // As if user secondary
2055 case 0x19: // As if user secondary LE
2056 case 0x4a: // UPA config
2057 case 0x81: // Secondary
2058 case 0x89: // Secondary LE
2066 env->lsu = val & (DMMU_E | IMMU_E);
2067 // Mappings generated during D/I MMU disabled mode are
2068 // invalid in normal mode
2069 if (oldreg != env->lsu) {
2070 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2079 case 0x50: // I-MMU regs
2081 int reg = (addr >> 3) & 0xf;
2084 oldreg = env->immuregs[reg];
2089 case 1: // Not in I-MMU
2096 val = 0; // Clear SFSR
2098 case 5: // TSB access
2099 case 6: // Tag access
2103 env->immuregs[reg] = val;
2104 if (oldreg != env->immuregs[reg]) {
2105 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2106 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2113 case 0x54: // I-MMU data in
2117 // Try finding an invalid entry
2118 for (i = 0; i < 64; i++) {
2119 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
2120 env->itlb_tag[i] = env->immuregs[6];
2121 env->itlb_tte[i] = val;
2125 // Try finding an unlocked entry
2126 for (i = 0; i < 64; i++) {
2127 if ((env->itlb_tte[i] & 0x40) == 0) {
2128 env->itlb_tag[i] = env->immuregs[6];
2129 env->itlb_tte[i] = val;
2136 case 0x55: // I-MMU data access
2140 unsigned int i = (addr >> 3) & 0x3f;
2142 env->itlb_tag[i] = env->immuregs[6];
2143 env->itlb_tte[i] = val;
2146 case 0x57: // I-MMU demap
2150 for (i = 0; i < 64; i++) {
2151 if ((env->itlb_tte[i] & 0x8000000000000000ULL) != 0) {
2152 target_ulong mask = 0xffffffffffffe000ULL;
2154 mask <<= 3 * ((env->itlb_tte[i] >> 61) & 3);
2155 if ((val & mask) == (env->itlb_tag[i] & mask)) {
2156 env->itlb_tag[i] = 0;
2157 env->itlb_tte[i] = 0;
2164 case 0x58: // D-MMU regs
2166 int reg = (addr >> 3) & 0xf;
2169 oldreg = env->dmmuregs[reg];
2175 if ((val & 1) == 0) {
2176 val = 0; // Clear SFSR, Fault address
2177 env->dmmuregs[4] = 0;
2179 env->dmmuregs[reg] = val;
2181 case 1: // Primary context
2182 case 2: // Secondary context
2183 case 5: // TSB access
2184 case 6: // Tag access
2185 case 7: // Virtual Watchpoint
2186 case 8: // Physical Watchpoint
2190 env->dmmuregs[reg] = val;
2191 if (oldreg != env->dmmuregs[reg]) {
2192 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2193 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2200 case 0x5c: // D-MMU data in
2204 // Try finding an invalid entry
2205 for (i = 0; i < 64; i++) {
2206 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2207 env->dtlb_tag[i] = env->dmmuregs[6];
2208 env->dtlb_tte[i] = val;
2212 // Try finding an unlocked entry
2213 for (i = 0; i < 64; i++) {
2214 if ((env->dtlb_tte[i] & 0x40) == 0) {
2215 env->dtlb_tag[i] = env->dmmuregs[6];
2216 env->dtlb_tte[i] = val;
2223 case 0x5d: // D-MMU data access
2225 unsigned int i = (addr >> 3) & 0x3f;
2227 env->dtlb_tag[i] = env->dmmuregs[6];
2228 env->dtlb_tte[i] = val;
2231 case 0x5f: // D-MMU demap
2235 for (i = 0; i < 64; i++) {
2236 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) != 0) {
2237 target_ulong mask = 0xffffffffffffe000ULL;
2239 mask <<= 3 * ((env->dtlb_tte[i] >> 61) & 3);
2240 if ((val & mask) == (env->dtlb_tag[i] & mask)) {
2241 env->dtlb_tag[i] = 0;
2242 env->dtlb_tte[i] = 0;
2249 case 0x49: // Interrupt data receive
2252 case 0x46: // D-cache data
2253 case 0x47: // D-cache tag access
2254 case 0x4b: // E-cache error enable
2255 case 0x4c: // E-cache asynchronous fault status
2256 case 0x4d: // E-cache asynchronous fault address
2257 case 0x4e: // E-cache tag data
2258 case 0x66: // I-cache instruction access
2259 case 0x67: // I-cache tag access
2260 case 0x6e: // I-cache predecode
2261 case 0x6f: // I-cache LRU etc.
2262 case 0x76: // E-cache tag
2263 case 0x7e: // E-cache tag
2265 case 0x51: // I-MMU 8k TSB pointer, RO
2266 case 0x52: // I-MMU 64k TSB pointer, RO
2267 case 0x56: // I-MMU tag read, RO
2268 case 0x59: // D-MMU 8k TSB pointer, RO
2269 case 0x5a: // D-MMU 64k TSB pointer, RO
2270 case 0x5b: // D-MMU data pointer, RO
2271 case 0x5e: // D-MMU tag read, RO
2272 case 0x48: // Interrupt dispatch, RO
2273 case 0x7f: // Incoming interrupt vector, RO
2274 case 0x82: // Primary no-fault, RO
2275 case 0x83: // Secondary no-fault, RO
2276 case 0x8a: // Primary no-fault LE, RO
2277 case 0x8b: // Secondary no-fault LE, RO
2279 do_unassigned_access(addr, 1, 0, 1, size);
2283 #endif /* CONFIG_USER_ONLY */
2285 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2287 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2288 || ((env->def->features & CPU_FEATURE_HYPV)
2289 && asi >= 0x30 && asi < 0x80
2290 && !(env->hpstate & HS_PRIV)))
2291 raise_exception(TT_PRIV_ACT);
2294 case 0x24: // Nucleus quad LDD 128 bit atomic
2295 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2296 helper_check_align(addr, 0xf);
2298 env->gregs[1] = ldq_kernel(addr + 8);
2300 bswap64s(&env->gregs[1]);
2301 } else if (rd < 8) {
2302 env->gregs[rd] = ldq_kernel(addr);
2303 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2305 bswap64s(&env->gregs[rd]);
2306 bswap64s(&env->gregs[rd + 1]);
2309 env->regwptr[rd] = ldq_kernel(addr);
2310 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2312 bswap64s(&env->regwptr[rd]);
2313 bswap64s(&env->regwptr[rd + 1]);
2318 helper_check_align(addr, 0x3);
2320 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2322 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2323 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2325 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2326 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2332 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2337 helper_check_align(addr, 3);
2339 case 0xf0: // Block load primary
2340 case 0xf1: // Block load secondary
2341 case 0xf8: // Block load primary LE
2342 case 0xf9: // Block load secondary LE
2344 raise_exception(TT_ILL_INSN);
2347 helper_check_align(addr, 0x3f);
2348 for (i = 0; i < 16; i++) {
2349 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2359 val = helper_ld_asi(addr, asi, size, 0);
2363 *((uint32_t *)&env->fpr[rd]) = val;
2366 *((int64_t *)&DT0) = val;
2374 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2377 target_ulong val = 0;
2379 helper_check_align(addr, 3);
2381 case 0xe0: // UA2007 Block commit store primary (cache flush)
2382 case 0xe1: // UA2007 Block commit store secondary (cache flush)
2383 case 0xf0: // Block store primary
2384 case 0xf1: // Block store secondary
2385 case 0xf8: // Block store primary LE
2386 case 0xf9: // Block store secondary LE
2388 raise_exception(TT_ILL_INSN);
2391 helper_check_align(addr, 0x3f);
2392 for (i = 0; i < 16; i++) {
2393 val = *(uint32_t *)&env->fpr[rd++];
2394 helper_st_asi(addr, val, asi & 0x8f, 4);
2406 val = *((uint32_t *)&env->fpr[rd]);
2409 val = *((int64_t *)&DT0);
2415 helper_st_asi(addr, val, asi, size);
2418 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2419 target_ulong val2, uint32_t asi)
2423 val2 &= 0xffffffffUL;
2424 ret = helper_ld_asi(addr, asi, 4, 0);
2425 ret &= 0xffffffffUL;
2427 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2431 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2432 target_ulong val2, uint32_t asi)
2436 ret = helper_ld_asi(addr, asi, 8, 0);
2438 helper_st_asi(addr, val1, asi, 8);
2441 #endif /* TARGET_SPARC64 */
2443 #ifndef TARGET_SPARC64
2444 void helper_rett(void)
2448 if (env->psret == 1)
2449 raise_exception(TT_ILL_INSN);
2452 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2453 if (env->wim & (1 << cwp)) {
2454 raise_exception(TT_WIN_UNF);
2457 env->psrs = env->psrps;
2461 target_ulong helper_udiv(target_ulong a, target_ulong b)
2466 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2470 raise_exception(TT_DIV_ZERO);
2474 if (x0 > 0xffffffff) {
2483 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2488 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2492 raise_exception(TT_DIV_ZERO);
2496 if ((int32_t) x0 != x0) {
2498 return x0 < 0? 0x80000000: 0x7fffffff;
2505 void helper_stdf(target_ulong addr, int mem_idx)
2507 helper_check_align(addr, 7);
2508 #if !defined(CONFIG_USER_ONLY)
2511 stfq_user(addr, DT0);
2514 stfq_kernel(addr, DT0);
2516 #ifdef TARGET_SPARC64
2518 stfq_hypv(addr, DT0);
2525 address_mask(env, &addr);
2526 stfq_raw(addr, DT0);
2530 void helper_lddf(target_ulong addr, int mem_idx)
2532 helper_check_align(addr, 7);
2533 #if !defined(CONFIG_USER_ONLY)
2536 DT0 = ldfq_user(addr);
2539 DT0 = ldfq_kernel(addr);
2541 #ifdef TARGET_SPARC64
2543 DT0 = ldfq_hypv(addr);
2550 address_mask(env, &addr);
2551 DT0 = ldfq_raw(addr);
2555 void helper_ldqf(target_ulong addr, int mem_idx)
2557 // XXX add 128 bit load
2560 helper_check_align(addr, 7);
2561 #if !defined(CONFIG_USER_ONLY)
2564 u.ll.upper = ldq_user(addr);
2565 u.ll.lower = ldq_user(addr + 8);
2569 u.ll.upper = ldq_kernel(addr);
2570 u.ll.lower = ldq_kernel(addr + 8);
2573 #ifdef TARGET_SPARC64
2575 u.ll.upper = ldq_hypv(addr);
2576 u.ll.lower = ldq_hypv(addr + 8);
2584 address_mask(env, &addr);
2585 u.ll.upper = ldq_raw(addr);
2586 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2591 void helper_stqf(target_ulong addr, int mem_idx)
2593 // XXX add 128 bit store
2596 helper_check_align(addr, 7);
2597 #if !defined(CONFIG_USER_ONLY)
2601 stq_user(addr, u.ll.upper);
2602 stq_user(addr + 8, u.ll.lower);
2606 stq_kernel(addr, u.ll.upper);
2607 stq_kernel(addr + 8, u.ll.lower);
2609 #ifdef TARGET_SPARC64
2612 stq_hypv(addr, u.ll.upper);
2613 stq_hypv(addr + 8, u.ll.lower);
2621 address_mask(env, &addr);
2622 stq_raw(addr, u.ll.upper);
2623 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2627 static inline void set_fsr(void)
2631 switch (env->fsr & FSR_RD_MASK) {
2632 case FSR_RD_NEAREST:
2633 rnd_mode = float_round_nearest_even;
2637 rnd_mode = float_round_to_zero;
2640 rnd_mode = float_round_up;
2643 rnd_mode = float_round_down;
2646 set_float_rounding_mode(rnd_mode, &env->fp_status);
2649 void helper_ldfsr(uint32_t new_fsr)
2651 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
2655 #ifdef TARGET_SPARC64
2656 void helper_ldxfsr(uint64_t new_fsr)
2658 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
2663 void helper_debug(void)
2665 env->exception_index = EXCP_DEBUG;
2669 #ifndef TARGET_SPARC64
2670 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2672 void helper_save(void)
2676 cwp = cpu_cwp_dec(env, env->cwp - 1);
2677 if (env->wim & (1 << cwp)) {
2678 raise_exception(TT_WIN_OVF);
2683 void helper_restore(void)
2687 cwp = cpu_cwp_inc(env, env->cwp + 1);
2688 if (env->wim & (1 << cwp)) {
2689 raise_exception(TT_WIN_UNF);
2694 void helper_wrpsr(target_ulong new_psr)
2696 if ((new_psr & PSR_CWP) >= env->nwindows)
2697 raise_exception(TT_ILL_INSN);
2699 PUT_PSR(env, new_psr);
2702 target_ulong helper_rdpsr(void)
2704 return GET_PSR(env);
2708 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2710 void helper_save(void)
2714 cwp = cpu_cwp_dec(env, env->cwp - 1);
2715 if (env->cansave == 0) {
2716 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2717 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2718 ((env->wstate & 0x7) << 2)));
2720 if (env->cleanwin - env->canrestore == 0) {
2721 // XXX Clean windows without trap
2722 raise_exception(TT_CLRWIN);
2731 void helper_restore(void)
2735 cwp = cpu_cwp_inc(env, env->cwp + 1);
2736 if (env->canrestore == 0) {
2737 raise_exception(TT_FILL | (env->otherwin != 0 ?
2738 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2739 ((env->wstate & 0x7) << 2)));
2747 void helper_flushw(void)
2749 if (env->cansave != env->nwindows - 2) {
2750 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2751 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2752 ((env->wstate & 0x7) << 2)));
2756 void helper_saved(void)
2759 if (env->otherwin == 0)
2765 void helper_restored(void)
2768 if (env->cleanwin < env->nwindows - 1)
2770 if (env->otherwin == 0)
2776 target_ulong helper_rdccr(void)
2778 return GET_CCR(env);
2781 void helper_wrccr(target_ulong new_ccr)
2783 PUT_CCR(env, new_ccr);
2786 // CWP handling is reversed in V9, but we still use the V8 register
2788 target_ulong helper_rdcwp(void)
2790 return GET_CWP64(env);
2793 void helper_wrcwp(target_ulong new_cwp)
2795 PUT_CWP64(env, new_cwp);
2798 // This function uses non-native bit order
2799 #define GET_FIELD(X, FROM, TO) \
2800 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2802 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2803 #define GET_FIELD_SP(X, FROM, TO) \
2804 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2806 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2808 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2809 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2810 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2811 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2812 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2813 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2814 (((pixel_addr >> 55) & 1) << 4) |
2815 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2816 GET_FIELD_SP(pixel_addr, 11, 12);
2819 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2823 tmp = addr + offset;
2825 env->gsr |= tmp & 7ULL;
2829 target_ulong helper_popc(target_ulong val)
2831 return ctpop64(val);
2834 static inline uint64_t *get_gregset(uint64_t pstate)
2849 static inline void change_pstate(uint64_t new_pstate)
2851 uint64_t pstate_regs, new_pstate_regs;
2852 uint64_t *src, *dst;
2854 pstate_regs = env->pstate & 0xc01;
2855 new_pstate_regs = new_pstate & 0xc01;
2856 if (new_pstate_regs != pstate_regs) {
2857 // Switch global register bank
2858 src = get_gregset(new_pstate_regs);
2859 dst = get_gregset(pstate_regs);
2860 memcpy32(dst, env->gregs);
2861 memcpy32(env->gregs, src);
2863 env->pstate = new_pstate;
2866 void helper_wrpstate(target_ulong new_state)
2868 if (!(env->def->features & CPU_FEATURE_GL))
2869 change_pstate(new_state & 0xf3f);
2872 void helper_done(void)
2874 env->pc = env->tsptr->tpc;
2875 env->npc = env->tsptr->tnpc + 4;
2876 PUT_CCR(env, env->tsptr->tstate >> 32);
2877 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2878 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2879 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2881 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2884 void helper_retry(void)
2886 env->pc = env->tsptr->tpc;
2887 env->npc = env->tsptr->tnpc;
2888 PUT_CCR(env, env->tsptr->tstate >> 32);
2889 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2890 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2891 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2893 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2896 void helper_set_softint(uint64_t value)
2898 env->softint |= (uint32_t)value;
2901 void helper_clear_softint(uint64_t value)
2903 env->softint &= (uint32_t)~value;
2906 void helper_write_softint(uint64_t value)
2908 env->softint = (uint32_t)value;
2912 void helper_flush(target_ulong addr)
2915 tb_invalidate_page_range(addr, addr + 8);
2918 #ifdef TARGET_SPARC64
2920 static const char * const excp_names[0x80] = {
2921 [TT_TFAULT] = "Instruction Access Fault",
2922 [TT_TMISS] = "Instruction Access MMU Miss",
2923 [TT_CODE_ACCESS] = "Instruction Access Error",
2924 [TT_ILL_INSN] = "Illegal Instruction",
2925 [TT_PRIV_INSN] = "Privileged Instruction",
2926 [TT_NFPU_INSN] = "FPU Disabled",
2927 [TT_FP_EXCP] = "FPU Exception",
2928 [TT_TOVF] = "Tag Overflow",
2929 [TT_CLRWIN] = "Clean Windows",
2930 [TT_DIV_ZERO] = "Division By Zero",
2931 [TT_DFAULT] = "Data Access Fault",
2932 [TT_DMISS] = "Data Access MMU Miss",
2933 [TT_DATA_ACCESS] = "Data Access Error",
2934 [TT_DPROT] = "Data Protection Error",
2935 [TT_UNALIGNED] = "Unaligned Memory Access",
2936 [TT_PRIV_ACT] = "Privileged Action",
2937 [TT_EXTINT | 0x1] = "External Interrupt 1",
2938 [TT_EXTINT | 0x2] = "External Interrupt 2",
2939 [TT_EXTINT | 0x3] = "External Interrupt 3",
2940 [TT_EXTINT | 0x4] = "External Interrupt 4",
2941 [TT_EXTINT | 0x5] = "External Interrupt 5",
2942 [TT_EXTINT | 0x6] = "External Interrupt 6",
2943 [TT_EXTINT | 0x7] = "External Interrupt 7",
2944 [TT_EXTINT | 0x8] = "External Interrupt 8",
2945 [TT_EXTINT | 0x9] = "External Interrupt 9",
2946 [TT_EXTINT | 0xa] = "External Interrupt 10",
2947 [TT_EXTINT | 0xb] = "External Interrupt 11",
2948 [TT_EXTINT | 0xc] = "External Interrupt 12",
2949 [TT_EXTINT | 0xd] = "External Interrupt 13",
2950 [TT_EXTINT | 0xe] = "External Interrupt 14",
2951 [TT_EXTINT | 0xf] = "External Interrupt 15",
2955 void do_interrupt(CPUState *env)
2957 int intno = env->exception_index;
2960 if (qemu_loglevel_mask(CPU_LOG_INT)) {
2964 if (intno < 0 || intno >= 0x180)
2966 else if (intno >= 0x100)
2967 name = "Trap Instruction";
2968 else if (intno >= 0xc0)
2969 name = "Window Fill";
2970 else if (intno >= 0x80)
2971 name = "Window Spill";
2973 name = excp_names[intno];
2978 qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
2979 " SP=%016" PRIx64 "\n",
2982 env->npc, env->regwptr[6]);
2983 log_cpu_state(env, 0);
2990 ptr = (uint8_t *)env->pc;
2991 for(i = 0; i < 16; i++) {
2992 qemu_log(" %02x", ldub(ptr + i));
3000 #if !defined(CONFIG_USER_ONLY)
3001 if (env->tl >= env->maxtl) {
3002 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3003 " Error state", env->exception_index, env->tl, env->maxtl);
3007 if (env->tl < env->maxtl - 1) {
3010 env->pstate |= PS_RED;
3011 if (env->tl < env->maxtl)
3014 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
3015 env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
3016 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
3018 env->tsptr->tpc = env->pc;
3019 env->tsptr->tnpc = env->npc;
3020 env->tsptr->tt = intno;
3021 if (!(env->def->features & CPU_FEATURE_GL)) {
3024 change_pstate(PS_PEF | PS_PRIV | PS_IG);
3031 change_pstate(PS_PEF | PS_PRIV | PS_MG);
3034 change_pstate(PS_PEF | PS_PRIV | PS_AG);
3038 if (intno == TT_CLRWIN)
3039 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
3040 else if ((intno & 0x1c0) == TT_SPILL)
3041 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
3042 else if ((intno & 0x1c0) == TT_FILL)
3043 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
3044 env->tbr &= ~0x7fffULL;
3045 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
3047 env->npc = env->pc + 4;
3048 env->exception_index = 0;
3052 static const char * const excp_names[0x80] = {
3053 [TT_TFAULT] = "Instruction Access Fault",
3054 [TT_ILL_INSN] = "Illegal Instruction",
3055 [TT_PRIV_INSN] = "Privileged Instruction",
3056 [TT_NFPU_INSN] = "FPU Disabled",
3057 [TT_WIN_OVF] = "Window Overflow",
3058 [TT_WIN_UNF] = "Window Underflow",
3059 [TT_UNALIGNED] = "Unaligned Memory Access",
3060 [TT_FP_EXCP] = "FPU Exception",
3061 [TT_DFAULT] = "Data Access Fault",
3062 [TT_TOVF] = "Tag Overflow",
3063 [TT_EXTINT | 0x1] = "External Interrupt 1",
3064 [TT_EXTINT | 0x2] = "External Interrupt 2",
3065 [TT_EXTINT | 0x3] = "External Interrupt 3",
3066 [TT_EXTINT | 0x4] = "External Interrupt 4",
3067 [TT_EXTINT | 0x5] = "External Interrupt 5",
3068 [TT_EXTINT | 0x6] = "External Interrupt 6",
3069 [TT_EXTINT | 0x7] = "External Interrupt 7",
3070 [TT_EXTINT | 0x8] = "External Interrupt 8",
3071 [TT_EXTINT | 0x9] = "External Interrupt 9",
3072 [TT_EXTINT | 0xa] = "External Interrupt 10",
3073 [TT_EXTINT | 0xb] = "External Interrupt 11",
3074 [TT_EXTINT | 0xc] = "External Interrupt 12",
3075 [TT_EXTINT | 0xd] = "External Interrupt 13",
3076 [TT_EXTINT | 0xe] = "External Interrupt 14",
3077 [TT_EXTINT | 0xf] = "External Interrupt 15",
3078 [TT_TOVF] = "Tag Overflow",
3079 [TT_CODE_ACCESS] = "Instruction Access Error",
3080 [TT_DATA_ACCESS] = "Data Access Error",
3081 [TT_DIV_ZERO] = "Division By Zero",
3082 [TT_NCP_INSN] = "Coprocessor Disabled",
3086 void do_interrupt(CPUState *env)
3088 int cwp, intno = env->exception_index;
3091 if (qemu_loglevel_mask(CPU_LOG_INT)) {
3095 if (intno < 0 || intno >= 0x100)
3097 else if (intno >= 0x80)
3098 name = "Trap Instruction";
3100 name = excp_names[intno];
3105 qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
3108 env->npc, env->regwptr[6]);
3109 log_cpu_state(env, 0);
3116 ptr = (uint8_t *)env->pc;
3117 for(i = 0; i < 16; i++) {
3118 qemu_log(" %02x", ldub(ptr + i));
3126 #if !defined(CONFIG_USER_ONLY)
3127 if (env->psret == 0) {
3128 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
3129 env->exception_index);
3134 cwp = cpu_cwp_dec(env, env->cwp - 1);
3135 cpu_set_cwp(env, cwp);
3136 env->regwptr[9] = env->pc;
3137 env->regwptr[10] = env->npc;
3138 env->psrps = env->psrs;
3140 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
3142 env->npc = env->pc + 4;
3143 env->exception_index = 0;
3147 #if !defined(CONFIG_USER_ONLY)
3149 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3152 #define MMUSUFFIX _mmu
3153 #define ALIGNED_ONLY
3156 #include "softmmu_template.h"
3159 #include "softmmu_template.h"
3162 #include "softmmu_template.h"
3165 #include "softmmu_template.h"
3167 /* XXX: make it generic ? */
3168 static void cpu_restore_state2(void *retaddr)
3170 TranslationBlock *tb;
3174 /* now we have a real cpu fault */
3175 pc = (unsigned long)retaddr;
3176 tb = tb_find_pc(pc);
3178 /* the PC is inside the translated code. It means that we have
3179 a virtual CPU fault */
3180 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3185 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3188 #ifdef DEBUG_UNALIGNED
3189 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3190 "\n", addr, env->pc);
3192 cpu_restore_state2(retaddr);
3193 raise_exception(TT_UNALIGNED);
3196 /* try to fill the TLB and return an exception if error. If retaddr is
3197 NULL, it means that the function was called in C code (i.e. not
3198 from generated code or from helper.c) */
3199 /* XXX: fix it to restore all registers */
3200 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3203 CPUState *saved_env;
3205 /* XXX: hack to restore env in all cases, even if not called from
3208 env = cpu_single_env;
3210 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3212 cpu_restore_state2(retaddr);
3220 #ifndef TARGET_SPARC64
3221 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3222 int is_asi, int size)
3224 CPUState *saved_env;
3226 /* XXX: hack to restore env in all cases, even if not called from
3229 env = cpu_single_env;
3230 #ifdef DEBUG_UNASSIGNED
3232 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3233 " asi 0x%02x from " TARGET_FMT_lx "\n",
3234 is_exec ? "exec" : is_write ? "write" : "read", size,
3235 size == 1 ? "" : "s", addr, is_asi, env->pc);
3237 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3238 " from " TARGET_FMT_lx "\n",
3239 is_exec ? "exec" : is_write ? "write" : "read", size,
3240 size == 1 ? "" : "s", addr, env->pc);
3242 if (env->mmuregs[3]) /* Fault status register */
3243 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3245 env->mmuregs[3] |= 1 << 16;
3247 env->mmuregs[3] |= 1 << 5;
3249 env->mmuregs[3] |= 1 << 6;
3251 env->mmuregs[3] |= 1 << 7;
3252 env->mmuregs[3] |= (5 << 2) | 2;
3253 env->mmuregs[4] = addr; /* Fault address register */
3254 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3256 raise_exception(TT_CODE_ACCESS);
3258 raise_exception(TT_DATA_ACCESS);
3263 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3264 int is_asi, int size)
3266 #ifdef DEBUG_UNASSIGNED
3267 CPUState *saved_env;
3269 /* XXX: hack to restore env in all cases, even if not called from
3272 env = cpu_single_env;
3273 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3274 "\n", addr, env->pc);
3278 raise_exception(TT_CODE_ACCESS);
3280 raise_exception(TT_DATA_ACCESS);
3284 #ifdef TARGET_SPARC64
3285 void helper_tick_set_count(void *opaque, uint64_t count)
3287 #if !defined(CONFIG_USER_ONLY)
3288 cpu_tick_set_count(opaque, count);
3292 uint64_t helper_tick_get_count(void *opaque)
3294 #if !defined(CONFIG_USER_ONLY)
3295 return cpu_tick_get_count(opaque);
3301 void helper_tick_set_limit(void *opaque, uint64_t limit)
3303 #if !defined(CONFIG_USER_ONLY)
3304 cpu_tick_set_limit(opaque, limit);