8 void cpu_reset(CPUARMState *env)
10 #if defined (CONFIG_USER_ONLY)
11 env->uncached_cpsr = ARM_CPU_MODE_USR;
12 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
14 /* SVC mode with interrupts disabled. */
15 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
16 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
21 CPUARMState *cpu_arm_init(void)
25 env = qemu_mallocz(sizeof(CPUARMState));
34 static inline void set_feature(CPUARMState *env, int feature)
36 env->features |= 1u << feature;
44 static const struct arm_cpu_t arm_cpu_names[] = {
45 { ARM_CPUID_ARM926, "arm926"},
46 { ARM_CPUID_ARM1026, "arm1026"},
50 void cpu_arm_set_model(CPUARMState *env, const char *name)
57 for (i = 0; arm_cpu_names[i].name; i++) {
58 if (strcmp(name, arm_cpu_names[i].name) == 0) {
59 id = arm_cpu_names[i].id;
64 cpu_abort(env, "Unknown CPU '%s'", name);
68 env->cp15.c0_cpuid = id;
70 case ARM_CPUID_ARM926:
71 set_feature(env, ARM_FEATURE_VFP);
72 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
74 case ARM_CPUID_ARM1026:
75 set_feature(env, ARM_FEATURE_VFP);
76 set_feature(env, ARM_FEATURE_AUXCR);
77 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
80 cpu_abort(env, "Bad CPU ID: %x\n", id);
85 void cpu_arm_close(CPUARMState *env)
90 #if defined(CONFIG_USER_ONLY)
92 void do_interrupt (CPUState *env)
94 env->exception_index = -1;
97 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
98 int is_user, int is_softmmu)
101 env->exception_index = EXCP_PREFETCH_ABORT;
102 env->cp15.c6_insn = address;
104 env->exception_index = EXCP_DATA_ABORT;
105 env->cp15.c6_data = address;
110 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
115 /* These should probably raise undefined insn exceptions. */
116 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
118 cpu_abort(env, "cp15 insn %08x\n", insn);
121 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
123 cpu_abort(env, "cp15 insn %08x\n", insn);
127 void switch_mode(CPUState *env, int mode)
129 if (mode != ARM_CPU_MODE_USR)
130 cpu_abort(env, "Tried to switch out of user mode\n");
135 extern int semihosting_enabled;
137 /* Map CPU modes onto saved register banks. */
138 static inline int bank_number (int mode)
141 case ARM_CPU_MODE_USR:
142 case ARM_CPU_MODE_SYS:
144 case ARM_CPU_MODE_SVC:
146 case ARM_CPU_MODE_ABT:
148 case ARM_CPU_MODE_UND:
150 case ARM_CPU_MODE_IRQ:
152 case ARM_CPU_MODE_FIQ:
155 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
159 void switch_mode(CPUState *env, int mode)
164 old_mode = env->uncached_cpsr & CPSR_M;
165 if (mode == old_mode)
168 if (old_mode == ARM_CPU_MODE_FIQ) {
169 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
170 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
171 } else if (mode == ARM_CPU_MODE_FIQ) {
172 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
173 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
176 i = bank_number(old_mode);
177 env->banked_r13[i] = env->regs[13];
178 env->banked_r14[i] = env->regs[14];
179 env->banked_spsr[i] = env->spsr;
181 i = bank_number(mode);
182 env->regs[13] = env->banked_r13[i];
183 env->regs[14] = env->banked_r14[i];
184 env->spsr = env->banked_spsr[i];
187 /* Handle a CPU exception. */
188 void do_interrupt(CPUARMState *env)
195 /* TODO: Vectored interrupt controller. */
196 switch (env->exception_index) {
198 new_mode = ARM_CPU_MODE_UND;
207 if (semihosting_enabled) {
208 /* Check for semihosting interrupt. */
210 mask = lduw_code(env->regs[15] - 2) & 0xff;
212 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
214 /* Only intercept calls from privileged modes, to provide some
215 semblance of security. */
216 if (((mask == 0x123456 && !env->thumb)
217 || (mask == 0xab && env->thumb))
218 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
219 env->regs[0] = do_arm_semihosting(env);
223 new_mode = ARM_CPU_MODE_SVC;
226 /* The PC already points to the next instructon. */
229 case EXCP_PREFETCH_ABORT:
231 new_mode = ARM_CPU_MODE_ABT;
233 mask = CPSR_A | CPSR_I;
236 case EXCP_DATA_ABORT:
237 new_mode = ARM_CPU_MODE_ABT;
239 mask = CPSR_A | CPSR_I;
243 new_mode = ARM_CPU_MODE_IRQ;
245 /* Disable IRQ and imprecise data aborts. */
246 mask = CPSR_A | CPSR_I;
250 new_mode = ARM_CPU_MODE_FIQ;
252 /* Disable FIQ, IRQ and imprecise data aborts. */
253 mask = CPSR_A | CPSR_I | CPSR_F;
257 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
258 return; /* Never happens. Keep compiler happy. */
261 if (env->cp15.c1_sys & (1 << 13)) {
264 switch_mode (env, new_mode);
265 env->spsr = cpsr_read(env);
266 /* Switch to the new mode, and switch to Arm mode. */
267 /* ??? Thumb interrupt handlers not implemented. */
268 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
269 env->uncached_cpsr |= mask;
271 env->regs[14] = env->regs[15] + offset;
272 env->regs[15] = addr;
273 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
276 /* Check section/page access permissions.
277 Returns the page protection flags, or zero if the access is not
279 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
283 return PAGE_READ | PAGE_WRITE;
287 if (access_type == 1)
289 switch ((env->cp15.c1_sys >> 8) & 3) {
291 return is_user ? 0 : PAGE_READ;
298 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
301 return (access_type == 1) ? 0 : PAGE_READ;
303 return PAGE_READ | PAGE_WRITE;
305 return PAGE_READ | PAGE_WRITE;
311 static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
312 int is_user, uint32_t *phys_ptr, int *prot)
322 /* Fast Context Switch Extension. */
323 if (address < 0x02000000)
324 address += env->cp15.c13_fcse;
326 if ((env->cp15.c1_sys & 1) == 0) {
329 *prot = PAGE_READ | PAGE_WRITE;
331 /* Pagetable walk. */
332 /* Lookup l1 descriptor. */
333 table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
334 desc = ldl_phys(table);
336 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
338 /* Secton translation fault. */
342 if (domain == 0 || domain == 2) {
344 code = 9; /* Section domain fault. */
346 code = 11; /* Page domain fault. */
351 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
352 ap = (desc >> 10) & 3;
355 /* Lookup l2 entry. */
356 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
357 desc = ldl_phys(table);
359 case 0: /* Page translation fault. */
362 case 1: /* 64k page. */
363 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
364 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
366 case 2: /* 4k page. */
367 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
368 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
370 case 3: /* 1k page. */
372 /* Page translation fault. */
376 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
377 ap = (desc >> 4) & 3;
380 /* Never happens, but compiler isn't smart enough to tell. */
385 *prot = check_ap(env, ap, domain, access_type, is_user);
387 /* Access permission fault. */
390 *phys_ptr = phys_addr;
394 return code | (domain << 4);
397 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
398 int access_type, int is_user, int is_softmmu)
404 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
406 /* Map a single [sub]page. */
407 phys_addr &= ~(uint32_t)0x3ff;
408 address &= ~(uint32_t)0x3ff;
409 return tlb_set_page (env, address, phys_addr, prot, is_user,
413 if (access_type == 2) {
414 env->cp15.c5_insn = ret;
415 env->cp15.c6_insn = address;
416 env->exception_index = EXCP_PREFETCH_ABORT;
418 env->cp15.c5_data = ret;
419 env->cp15.c6_data = address;
420 env->exception_index = EXCP_DATA_ABORT;
425 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
431 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
439 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
443 op2 = (insn >> 5) & 7;
444 switch ((insn >> 16) & 0xf) {
445 case 0: /* ID codes. */
447 case 1: /* System configuration. */
450 env->cp15.c1_sys = val;
451 /* ??? Lots of these bits are not implemented. */
452 /* This may enable/disable the MMU, so do a TLB flush. */
456 env->cp15.c1_coproc = val;
457 /* ??? Is this safe when called from within a TB? */
463 case 2: /* MMU Page table control. */
466 case 3: /* MMU Domain access control. */
469 case 4: /* Reserved. */
471 case 5: /* MMU Fault status. */
474 env->cp15.c5_data = val;
477 env->cp15.c5_insn = val;
483 case 6: /* MMU Fault address. */
486 env->cp15.c6_data = val;
489 env->cp15.c6_insn = val;
495 case 7: /* Cache control. */
496 /* No cache, so nothing to do. */
498 case 8: /* MMU TLB control. */
500 case 0: /* Invalidate all. */
503 case 1: /* Invalidate single TLB entry. */
505 /* ??? This is wrong for large pages and sections. */
506 /* As an ugly hack to make linux work we always flush a 4K
509 tlb_flush_page(env, val);
510 tlb_flush_page(env, val + 0x400);
511 tlb_flush_page(env, val + 0x800);
512 tlb_flush_page(env, val + 0xc00);
521 case 9: /* Cache lockdown. */
524 env->cp15.c9_data = val;
527 env->cp15.c9_insn = val;
533 case 10: /* MMU TLB lockdown. */
534 /* ??? TLB lockdown not implemented. */
536 case 11: /* TCM DMA control. */
537 case 12: /* Reserved. */
539 case 13: /* Process ID. */
542 /* Unlike real hardware the qemu TLB uses virtual addresses,
543 not modified virtual addresses, so this causes a TLB flush.
545 if (env->cp15.c13_fcse != val)
547 env->cp15.c13_fcse = val;
550 /* This changes the ASID, so do a TLB flush. */
551 if (env->cp15.c13_context != val)
553 env->cp15.c13_context = val;
559 case 14: /* Reserved. */
561 case 15: /* Implementation specific. */
562 /* ??? Internal registers not implemented. */
567 /* ??? For debugging only. Should raise illegal instruction exception. */
568 cpu_abort(env, "Unimplemented cp15 register read\n");
571 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
575 op2 = (insn >> 5) & 7;
576 switch ((insn >> 16) & 0xf) {
577 case 0: /* ID codes. */
579 default: /* Device ID. */
580 return env->cp15.c0_cpuid;
581 case 1: /* Cache Type. */
583 case 2: /* TCM status. */
586 case 1: /* System configuration. */
588 case 0: /* Control register. */
589 return env->cp15.c1_sys;
590 case 1: /* Auxiliary control register. */
591 if (arm_feature(env, ARM_FEATURE_AUXCR))
594 case 2: /* Coprocessor access register. */
595 return env->cp15.c1_coproc;
599 case 2: /* MMU Page table control. */
601 case 3: /* MMU Domain access control. */
603 case 4: /* Reserved. */
605 case 5: /* MMU Fault status. */
608 return env->cp15.c5_data;
610 return env->cp15.c5_insn;
614 case 6: /* MMU Fault address. */
617 return env->cp15.c6_data;
619 /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't
621 return env->cp15.c6_insn;
625 case 7: /* Cache control. */
626 /* ??? This is for test, clean and invaidate operations that set the
627 Z flag. We can't represent N = Z = 1, so it also clears clears
628 the N flag. Oh well. */
631 case 8: /* MMU TLB control. */
633 case 9: /* Cache lockdown. */
636 return env->cp15.c9_data;
638 return env->cp15.c9_insn;
642 case 10: /* MMU TLB lockdown. */
643 /* ??? TLB lockdown not implemented. */
645 case 11: /* TCM DMA control. */
646 case 12: /* Reserved. */
648 case 13: /* Process ID. */
651 return env->cp15.c13_fcse;
653 return env->cp15.c13_context;
657 case 14: /* Reserved. */
659 case 15: /* Implementation specific. */
660 /* ??? Internal registers not implemented. */
664 /* ??? For debugging only. Should raise illegal instruction exception. */
665 cpu_abort(env, "Unimplemented cp15 register read\n");