8 static uint32_t cpu_arm_find_by_name(const char *name);
10 static inline void set_feature(CPUARMState *env, int feature)
12 env->features |= 1u << feature;
15 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
17 env->cp15.c0_cpuid = id;
19 case ARM_CPUID_ARM926:
20 set_feature(env, ARM_FEATURE_VFP);
21 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
22 env->cp15.c0_cachetype = 0x1dd20d2;
23 env->cp15.c1_sys = 0x00090078;
25 case ARM_CPUID_ARM946:
26 set_feature(env, ARM_FEATURE_MPU);
27 env->cp15.c0_cachetype = 0x0f004006;
28 env->cp15.c1_sys = 0x00000078;
30 case ARM_CPUID_ARM1026:
31 set_feature(env, ARM_FEATURE_VFP);
32 set_feature(env, ARM_FEATURE_AUXCR);
33 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
34 env->cp15.c0_cachetype = 0x1dd20d2;
35 env->cp15.c1_sys = 0x00090078;
37 case ARM_CPUID_TI915T:
38 case ARM_CPUID_TI925T:
39 set_feature(env, ARM_FEATURE_OMAPCP);
40 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
41 env->cp15.c0_cachetype = 0x5109149;
42 env->cp15.c1_sys = 0x00000070;
43 env->cp15.c15_i_max = 0x000;
44 env->cp15.c15_i_min = 0xff0;
46 case ARM_CPUID_PXA250:
47 case ARM_CPUID_PXA255:
48 case ARM_CPUID_PXA260:
49 case ARM_CPUID_PXA261:
50 case ARM_CPUID_PXA262:
51 set_feature(env, ARM_FEATURE_XSCALE);
52 /* JTAG_ID is ((id << 28) | 0x09265013) */
53 env->cp15.c0_cachetype = 0xd172172;
54 env->cp15.c1_sys = 0x00000078;
56 case ARM_CPUID_PXA270_A0:
57 case ARM_CPUID_PXA270_A1:
58 case ARM_CPUID_PXA270_B0:
59 case ARM_CPUID_PXA270_B1:
60 case ARM_CPUID_PXA270_C0:
61 case ARM_CPUID_PXA270_C5:
62 set_feature(env, ARM_FEATURE_XSCALE);
63 /* JTAG_ID is ((id << 28) | 0x09265013) */
64 set_feature(env, ARM_FEATURE_IWMMXT);
65 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
66 env->cp15.c0_cachetype = 0xd172172;
67 env->cp15.c1_sys = 0x00000078;
70 cpu_abort(env, "Bad CPU ID: %x\n", id);
75 void cpu_reset(CPUARMState *env)
78 id = env->cp15.c0_cpuid;
79 memset(env, 0, offsetof(CPUARMState, breakpoints));
81 cpu_reset_model_id(env, id);
82 #if defined (CONFIG_USER_ONLY)
83 env->uncached_cpsr = ARM_CPU_MODE_USR;
84 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
86 /* SVC mode with interrupts disabled. */
87 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
88 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
94 CPUARMState *cpu_arm_init(const char *cpu_model)
99 id = cpu_arm_find_by_name(cpu_model);
102 env = qemu_mallocz(sizeof(CPUARMState));
106 env->cp15.c0_cpuid = id;
116 static const struct arm_cpu_t arm_cpu_names[] = {
117 { ARM_CPUID_ARM926, "arm926"},
118 { ARM_CPUID_ARM946, "arm946"},
119 { ARM_CPUID_ARM1026, "arm1026"},
120 { ARM_CPUID_TI925T, "ti925t" },
121 { ARM_CPUID_PXA250, "pxa250" },
122 { ARM_CPUID_PXA255, "pxa255" },
123 { ARM_CPUID_PXA260, "pxa260" },
124 { ARM_CPUID_PXA261, "pxa261" },
125 { ARM_CPUID_PXA262, "pxa262" },
126 { ARM_CPUID_PXA270, "pxa270" },
127 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
128 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
129 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
130 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
131 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
132 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
136 void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
140 (*cpu_fprintf)(f, "Available CPUs:\n");
141 for (i = 0; arm_cpu_names[i].name; i++) {
142 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
146 /* return 0 if not found */
147 static uint32_t cpu_arm_find_by_name(const char *name)
153 for (i = 0; arm_cpu_names[i].name; i++) {
154 if (strcmp(name, arm_cpu_names[i].name) == 0) {
155 id = arm_cpu_names[i].id;
162 void cpu_arm_close(CPUARMState *env)
167 #if defined(CONFIG_USER_ONLY)
169 void do_interrupt (CPUState *env)
171 env->exception_index = -1;
174 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
175 int mmu_idx, int is_softmmu)
178 env->exception_index = EXCP_PREFETCH_ABORT;
179 env->cp15.c6_insn = address;
181 env->exception_index = EXCP_DATA_ABORT;
182 env->cp15.c6_data = address;
187 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
192 /* These should probably raise undefined insn exceptions. */
193 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
195 int op1 = (insn >> 8) & 0xf;
196 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
200 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
202 int op1 = (insn >> 8) & 0xf;
203 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
207 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
209 cpu_abort(env, "cp15 insn %08x\n", insn);
212 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
214 cpu_abort(env, "cp15 insn %08x\n", insn);
218 void switch_mode(CPUState *env, int mode)
220 if (mode != ARM_CPU_MODE_USR)
221 cpu_abort(env, "Tried to switch out of user mode\n");
226 extern int semihosting_enabled;
228 /* Map CPU modes onto saved register banks. */
229 static inline int bank_number (int mode)
232 case ARM_CPU_MODE_USR:
233 case ARM_CPU_MODE_SYS:
235 case ARM_CPU_MODE_SVC:
237 case ARM_CPU_MODE_ABT:
239 case ARM_CPU_MODE_UND:
241 case ARM_CPU_MODE_IRQ:
243 case ARM_CPU_MODE_FIQ:
246 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
250 void switch_mode(CPUState *env, int mode)
255 old_mode = env->uncached_cpsr & CPSR_M;
256 if (mode == old_mode)
259 if (old_mode == ARM_CPU_MODE_FIQ) {
260 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
261 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
262 } else if (mode == ARM_CPU_MODE_FIQ) {
263 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
264 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
267 i = bank_number(old_mode);
268 env->banked_r13[i] = env->regs[13];
269 env->banked_r14[i] = env->regs[14];
270 env->banked_spsr[i] = env->spsr;
272 i = bank_number(mode);
273 env->regs[13] = env->banked_r13[i];
274 env->regs[14] = env->banked_r14[i];
275 env->spsr = env->banked_spsr[i];
278 /* Handle a CPU exception. */
279 void do_interrupt(CPUARMState *env)
286 /* TODO: Vectored interrupt controller. */
287 switch (env->exception_index) {
289 new_mode = ARM_CPU_MODE_UND;
298 if (semihosting_enabled) {
299 /* Check for semihosting interrupt. */
301 mask = lduw_code(env->regs[15] - 2) & 0xff;
303 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
305 /* Only intercept calls from privileged modes, to provide some
306 semblance of security. */
307 if (((mask == 0x123456 && !env->thumb)
308 || (mask == 0xab && env->thumb))
309 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
310 env->regs[0] = do_arm_semihosting(env);
314 new_mode = ARM_CPU_MODE_SVC;
317 /* The PC already points to the next instructon. */
320 case EXCP_PREFETCH_ABORT:
322 new_mode = ARM_CPU_MODE_ABT;
324 mask = CPSR_A | CPSR_I;
327 case EXCP_DATA_ABORT:
328 new_mode = ARM_CPU_MODE_ABT;
330 mask = CPSR_A | CPSR_I;
334 new_mode = ARM_CPU_MODE_IRQ;
336 /* Disable IRQ and imprecise data aborts. */
337 mask = CPSR_A | CPSR_I;
341 new_mode = ARM_CPU_MODE_FIQ;
343 /* Disable FIQ, IRQ and imprecise data aborts. */
344 mask = CPSR_A | CPSR_I | CPSR_F;
348 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
349 return; /* Never happens. Keep compiler happy. */
352 if (env->cp15.c1_sys & (1 << 13)) {
355 switch_mode (env, new_mode);
356 env->spsr = cpsr_read(env);
357 /* Switch to the new mode, and switch to Arm mode. */
358 /* ??? Thumb interrupt handlers not implemented. */
359 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
360 env->uncached_cpsr |= mask;
362 env->regs[14] = env->regs[15] + offset;
363 env->regs[15] = addr;
364 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
367 /* Check section/page access permissions.
368 Returns the page protection flags, or zero if the access is not
370 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
374 return PAGE_READ | PAGE_WRITE;
378 if (access_type == 1)
380 switch ((env->cp15.c1_sys >> 8) & 3) {
382 return is_user ? 0 : PAGE_READ;
389 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
392 return (access_type == 1) ? 0 : PAGE_READ;
394 return PAGE_READ | PAGE_WRITE;
396 return PAGE_READ | PAGE_WRITE;
402 static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
403 int is_user, uint32_t *phys_ptr, int *prot)
413 /* Fast Context Switch Extension. */
414 if (address < 0x02000000)
415 address += env->cp15.c13_fcse;
417 if ((env->cp15.c1_sys & 1) == 0) {
418 /* MMU/MPU disabled. */
420 *prot = PAGE_READ | PAGE_WRITE;
421 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
427 for (n = 7; n >= 0; n--) {
428 base = env->cp15.c6_region[n];
431 mask = 1 << ((base >> 1) & 0x1f);
432 /* Keep this shift separate from the above to avoid an
433 (undefined) << 32. */
434 mask = (mask << 1) - 1;
435 if (((base ^ address) & ~mask) == 0)
441 if (access_type == 2) {
442 mask = env->cp15.c5_insn;
444 mask = env->cp15.c5_data;
446 mask = (mask >> (n * 4)) & 0xf;
453 *prot = PAGE_READ | PAGE_WRITE;
461 *prot = PAGE_READ | PAGE_WRITE;
472 /* Bad permission. */
476 /* Pagetable walk. */
477 /* Lookup l1 descriptor. */
478 table = (env->cp15.c2_base & 0xffffc000) | ((address >> 18) & 0x3ffc);
479 desc = ldl_phys(table);
481 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
483 /* Secton translation fault. */
487 if (domain == 0 || domain == 2) {
489 code = 9; /* Section domain fault. */
491 code = 11; /* Page domain fault. */
496 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
497 ap = (desc >> 10) & 3;
500 /* Lookup l2 entry. */
502 /* Coarse pagetable. */
503 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
505 /* Fine pagetable. */
506 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
508 desc = ldl_phys(table);
510 case 0: /* Page translation fault. */
513 case 1: /* 64k page. */
514 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
515 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
517 case 2: /* 4k page. */
518 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
519 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
521 case 3: /* 1k page. */
523 if (arm_feature(env, ARM_FEATURE_XSCALE))
524 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
526 /* Page translation fault. */
531 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
532 ap = (desc >> 4) & 3;
535 /* Never happens, but compiler isn't smart enough to tell. */
540 *prot = check_ap(env, ap, domain, access_type, is_user);
542 /* Access permission fault. */
545 *phys_ptr = phys_addr;
549 return code | (domain << 4);
552 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
553 int access_type, int mmu_idx, int is_softmmu)
559 is_user = mmu_idx == MMU_USER_IDX;
560 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
562 /* Map a single [sub]page. */
563 phys_addr &= ~(uint32_t)0x3ff;
564 address &= ~(uint32_t)0x3ff;
565 return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
569 if (access_type == 2) {
570 env->cp15.c5_insn = ret;
571 env->cp15.c6_insn = address;
572 env->exception_index = EXCP_PREFETCH_ABORT;
574 env->cp15.c5_data = ret;
575 env->cp15.c6_data = address;
576 env->exception_index = EXCP_DATA_ABORT;
581 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
587 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
595 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
597 int cp_num = (insn >> 8) & 0xf;
598 int cp_info = (insn >> 5) & 7;
599 int src = (insn >> 16) & 0xf;
600 int operand = insn & 0xf;
602 if (env->cp[cp_num].cp_write)
603 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
604 cp_info, src, operand, val);
607 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
609 int cp_num = (insn >> 8) & 0xf;
610 int cp_info = (insn >> 5) & 7;
611 int dest = (insn >> 16) & 0xf;
612 int operand = insn & 0xf;
614 if (env->cp[cp_num].cp_read)
615 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
616 cp_info, dest, operand);
620 /* Return basic MPU access permission bits. */
621 static uint32_t simple_mpu_ap_bits(uint32_t val)
628 for (i = 0; i < 16; i += 2) {
629 ret |= (val >> i) & mask;
635 /* Pad basic MPU access permission bits to extended format. */
636 static uint32_t extended_mpu_ap_bits(uint32_t val)
643 for (i = 0; i < 16; i += 2) {
644 ret |= (val & mask) << i;
650 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
655 op2 = (insn >> 5) & 7;
657 switch ((insn >> 16) & 0xf) {
658 case 0: /* ID codes. */
659 if (arm_feature(env, ARM_FEATURE_XSCALE))
661 if (arm_feature(env, ARM_FEATURE_OMAPCP))
664 case 1: /* System configuration. */
665 if (arm_feature(env, ARM_FEATURE_OMAPCP))
669 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
670 env->cp15.c1_sys = val;
671 /* ??? Lots of these bits are not implemented. */
672 /* This may enable/disable the MMU, so do a TLB flush. */
676 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
677 env->cp15.c1_xscaleauxcr = val;
682 if (arm_feature(env, ARM_FEATURE_XSCALE))
684 env->cp15.c1_coproc = val;
685 /* ??? Is this safe when called from within a TB? */
692 case 2: /* MMU Page table control / MPU cache control. */
693 if (arm_feature(env, ARM_FEATURE_MPU)) {
696 env->cp15.c2_data = val;
699 env->cp15.c2_insn = val;
705 env->cp15.c2_base = val;
708 case 3: /* MMU Domain access control / MPU write buffer control. */
710 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
712 case 4: /* Reserved. */
714 case 5: /* MMU Fault status / MPU access permission. */
715 if (arm_feature(env, ARM_FEATURE_OMAPCP))
719 if (arm_feature(env, ARM_FEATURE_MPU))
720 val = extended_mpu_ap_bits(val);
721 env->cp15.c5_data = val;
724 if (arm_feature(env, ARM_FEATURE_MPU))
725 val = extended_mpu_ap_bits(val);
726 env->cp15.c5_insn = val;
729 if (!arm_feature(env, ARM_FEATURE_MPU))
731 env->cp15.c5_data = val;
734 if (!arm_feature(env, ARM_FEATURE_MPU))
736 env->cp15.c5_insn = val;
742 case 6: /* MMU Fault address / MPU base/size. */
743 if (arm_feature(env, ARM_FEATURE_MPU)) {
746 env->cp15.c6_region[crm] = val;
748 if (arm_feature(env, ARM_FEATURE_OMAPCP))
752 env->cp15.c6_data = val;
755 env->cp15.c6_insn = val;
762 case 7: /* Cache control. */
763 env->cp15.c15_i_max = 0x000;
764 env->cp15.c15_i_min = 0xff0;
765 /* No cache, so nothing to do. */
767 case 8: /* MMU TLB control. */
769 case 0: /* Invalidate all. */
772 case 1: /* Invalidate single TLB entry. */
774 /* ??? This is wrong for large pages and sections. */
775 /* As an ugly hack to make linux work we always flush a 4K
778 tlb_flush_page(env, val);
779 tlb_flush_page(env, val + 0x400);
780 tlb_flush_page(env, val + 0x800);
781 tlb_flush_page(env, val + 0xc00);
791 if (arm_feature(env, ARM_FEATURE_OMAPCP))
794 case 0: /* Cache lockdown. */
797 env->cp15.c9_data = val;
800 env->cp15.c9_insn = val;
806 case 1: /* TCM memory region registers. */
807 /* Not implemented. */
813 case 10: /* MMU TLB lockdown. */
814 /* ??? TLB lockdown not implemented. */
816 case 12: /* Reserved. */
818 case 13: /* Process ID. */
821 /* Unlike real hardware the qemu TLB uses virtual addresses,
822 not modified virtual addresses, so this causes a TLB flush.
824 if (env->cp15.c13_fcse != val)
826 env->cp15.c13_fcse = val;
829 /* This changes the ASID, so do a TLB flush. */
830 if (env->cp15.c13_context != val
831 && !arm_feature(env, ARM_FEATURE_MPU))
833 env->cp15.c13_context = val;
839 case 14: /* Reserved. */
841 case 15: /* Implementation specific. */
842 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
843 if (op2 == 0 && crm == 1) {
844 if (env->cp15.c15_cpar != (val & 0x3fff)) {
845 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
847 env->cp15.c15_cpar = val & 0x3fff;
853 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
857 case 1: /* Set TI925T configuration. */
858 env->cp15.c15_ticonfig = val & 0xe7;
859 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
860 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
862 case 2: /* Set I_max. */
863 env->cp15.c15_i_max = val;
865 case 3: /* Set I_min. */
866 env->cp15.c15_i_min = val;
868 case 4: /* Set thread-ID. */
869 env->cp15.c15_threadid = val & 0xffff;
871 case 8: /* Wait-for-interrupt (deprecated). */
872 cpu_interrupt(env, CPU_INTERRUPT_HALT);
882 /* ??? For debugging only. Should raise illegal instruction exception. */
883 cpu_abort(env, "Unimplemented cp15 register write\n");
886 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
891 op2 = (insn >> 5) & 7;
893 switch ((insn >> 16) & 0xf) {
894 case 0: /* ID codes. */
896 default: /* Device ID. */
897 return env->cp15.c0_cpuid;
898 case 1: /* Cache Type. */
899 return env->cp15.c0_cachetype;
900 case 2: /* TCM status. */
901 if (arm_feature(env, ARM_FEATURE_XSCALE))
905 case 1: /* System configuration. */
906 if (arm_feature(env, ARM_FEATURE_OMAPCP))
909 case 0: /* Control register. */
910 return env->cp15.c1_sys;
911 case 1: /* Auxiliary control register. */
912 if (arm_feature(env, ARM_FEATURE_AUXCR))
914 if (arm_feature(env, ARM_FEATURE_XSCALE))
915 return env->cp15.c1_xscaleauxcr;
917 case 2: /* Coprocessor access register. */
918 if (arm_feature(env, ARM_FEATURE_XSCALE))
920 return env->cp15.c1_coproc;
924 case 2: /* MMU Page table control / MPU cache control. */
925 if (arm_feature(env, ARM_FEATURE_MPU)) {
928 return env->cp15.c2_data;
931 return env->cp15.c2_insn;
937 return env->cp15.c2_base;
939 case 3: /* MMU Domain access control / MPU write buffer control. */
941 case 4: /* Reserved. */
943 case 5: /* MMU Fault status / MPU access permission. */
944 if (arm_feature(env, ARM_FEATURE_OMAPCP))
948 if (arm_feature(env, ARM_FEATURE_MPU))
949 return simple_mpu_ap_bits(env->cp15.c5_data);
950 return env->cp15.c5_data;
952 if (arm_feature(env, ARM_FEATURE_MPU))
953 return simple_mpu_ap_bits(env->cp15.c5_data);
954 return env->cp15.c5_insn;
956 if (!arm_feature(env, ARM_FEATURE_MPU))
958 return env->cp15.c5_data;
960 if (!arm_feature(env, ARM_FEATURE_MPU))
962 return env->cp15.c5_insn;
966 case 6: /* MMU Fault address / MPU base/size. */
967 if (arm_feature(env, ARM_FEATURE_MPU)) {
972 return env->cp15.c6_region[n];
974 if (arm_feature(env, ARM_FEATURE_OMAPCP))
978 return env->cp15.c6_data;
980 /* Arm9 doesn't have an IFAR, but implementing it anyway
981 shouldn't do any harm. */
982 return env->cp15.c6_insn;
987 case 7: /* Cache control. */
988 /* ??? This is for test, clean and invaidate operations that set the
989 Z flag. We can't represent N = Z = 1, so it also clears
990 the N flag. Oh well. */
993 case 8: /* MMU TLB control. */
995 case 9: /* Cache lockdown. */
996 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1000 return env->cp15.c9_data;
1002 return env->cp15.c9_insn;
1006 case 10: /* MMU TLB lockdown. */
1007 /* ??? TLB lockdown not implemented. */
1009 case 11: /* TCM DMA control. */
1010 case 12: /* Reserved. */
1012 case 13: /* Process ID. */
1015 return env->cp15.c13_fcse;
1017 return env->cp15.c13_context;
1021 case 14: /* Reserved. */
1023 case 15: /* Implementation specific. */
1024 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1025 if (op2 == 0 && crm == 1)
1026 return env->cp15.c15_cpar;
1030 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1034 case 1: /* Read TI925T configuration. */
1035 return env->cp15.c15_ticonfig;
1036 case 2: /* Read I_max. */
1037 return env->cp15.c15_i_max;
1038 case 3: /* Read I_min. */
1039 return env->cp15.c15_i_min;
1040 case 4: /* Read thread-ID. */
1041 return env->cp15.c15_threadid;
1042 case 8: /* TI925T_status */
1050 /* ??? For debugging only. Should raise illegal instruction exception. */
1051 cpu_abort(env, "Unimplemented cp15 register read\n");
1055 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1056 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1059 if (cpnum < 0 || cpnum > 14) {
1060 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1064 env->cp[cpnum].cp_read = cp_read;
1065 env->cp[cpnum].cp_write = cp_write;
1066 env->cp[cpnum].opaque = opaque;