2 * MicroBlaze helper routines.
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
28 #include "host-utils.h"
33 #if defined(CONFIG_USER_ONLY)
35 void do_interrupt (CPUState *env)
37 env->exception_index = -1;
38 env->regs[14] = env->sregs[SR_PC];
41 int cpu_mb_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
42 int mmu_idx, int is_softmmu)
44 env->exception_index = 0xaa;
45 cpu_dump_state(env, stderr, fprintf, 0);
49 target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
54 #else /* !CONFIG_USER_ONLY */
56 int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
57 int mmu_idx, int is_softmmu)
60 unsigned int mmu_available;
65 if (env->pvr.regs[0] & PVR0_USE_MMU) {
67 if ((env->pvr.regs[0] & PVR0_PVR_FULL_MASK)
68 && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
73 /* Translate if the MMU is available and enabled. */
74 if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
75 target_ulong vaddr, paddr;
76 struct microblaze_mmu_lookup lu;
78 hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
80 vaddr = address & TARGET_PAGE_MASK;
81 paddr = lu.paddr + vaddr - lu.vaddr;
83 DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
84 mmu_idx, vaddr, paddr, lu.prot));
85 r = tlb_set_page(env, vaddr,
86 paddr, lu.prot, mmu_idx, is_softmmu);
88 env->sregs[SR_EAR] = address;
89 DMMU(qemu_log("mmu=%d miss addr=%x\n", mmu_idx, vaddr));
93 env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
94 env->sregs[SR_ESR] |= (rw == 1) << 10;
97 env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
98 env->sregs[SR_ESR] |= (rw == 1) << 10;
105 if (env->exception_index == EXCP_MMU) {
106 cpu_abort(env, "recursive faults\n");
110 env->exception_index = EXCP_MMU;
113 /* MMU disabled or not available. */
114 address &= TARGET_PAGE_MASK;
116 r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
121 void do_interrupt(CPUState *env)
125 /* IMM flag cannot propagate accross a branch and into the dslot. */
126 assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG)));
127 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
128 /* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */
129 switch (env->exception_index) {
131 env->regs[17] = env->sregs[SR_PC];
133 /* Exception breaks branch + dslot sequence? */
134 if (env->iflags & D_FLAG) {
135 D(qemu_log("D_FLAG set at exception bimm=%d\n", env->bimm));
136 env->sregs[SR_ESR] |= 1 << 12 ;
137 env->sregs[SR_BTR] = env->btarget;
139 /* Reexecute the branch. */
141 /* was the branch immprefixed?. */
143 qemu_log_mask(CPU_LOG_INT,
144 "bimm exception at pc=%x iflags=%x\n",
145 env->sregs[SR_PC], env->iflags);
147 log_cpu_state_mask(CPU_LOG_INT, env, 0);
149 } else if (env->iflags & IMM_FLAG) {
150 D(qemu_log("IMM_FLAG set at exception\n"));
154 /* Disable the MMU. */
155 t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
156 env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
157 env->sregs[SR_MSR] |= t;
158 /* Exception in progress. */
159 env->sregs[SR_MSR] |= MSR_EIP;
161 qemu_log_mask(CPU_LOG_INT,
162 "exception at pc=%x ear=%x iflags=%x\n",
163 env->sregs[SR_PC], env->sregs[SR_EAR], env->iflags);
164 log_cpu_state_mask(CPU_LOG_INT, env, 0);
165 env->iflags &= ~(IMM_FLAG | D_FLAG);
166 env->sregs[SR_PC] = 0x20;
170 assert(!(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)));
171 assert(env->sregs[SR_MSR] & MSR_IE);
172 assert(!(env->iflags & D_FLAG));
174 t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
179 /* Useful instrumentation when debugging interrupt issues in either
180 the models or in sw. */
184 sym = lookup_symbol(env->sregs[SR_PC]);
186 && (!strcmp("netif_rx", sym)
187 || !strcmp("process_backlog", sym))) {
190 "interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n",
191 env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags,
194 log_cpu_state(env, 0);
198 qemu_log_mask(CPU_LOG_INT,
199 "interrupt at pc=%x msr=%x %x iflags=%x\n",
200 env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
202 env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM \
204 env->sregs[SR_MSR] |= t;
206 env->regs[14] = env->sregs[SR_PC];
207 env->sregs[SR_PC] = 0x10;
208 //log_cpu_state_mask(CPU_LOG_INT, env, 0);
213 assert(!(env->iflags & IMM_FLAG));
214 assert(!(env->iflags & D_FLAG));
215 t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
216 qemu_log_mask(CPU_LOG_INT,
217 "break at pc=%x msr=%x %x iflags=%x\n",
218 env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
219 log_cpu_state_mask(CPU_LOG_INT, env, 0);
220 env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
221 env->sregs[SR_MSR] |= t;
222 env->sregs[SR_MSR] |= MSR_BIP;
223 if (env->exception_index == EXCP_HW_BREAK) {
224 env->regs[16] = env->sregs[SR_PC];
225 env->sregs[SR_MSR] |= MSR_BIP;
226 env->sregs[SR_PC] = 0x18;
228 env->sregs[SR_PC] = env->btarget;
231 cpu_abort(env, "unhandled exception type=%d\n",
232 env->exception_index);
237 target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
239 target_ulong vaddr, paddr = 0;
240 struct microblaze_mmu_lookup lu;
243 if (env->sregs[SR_MSR] & MSR_VM) {
244 hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
246 vaddr = addr & TARGET_PAGE_MASK;
247 paddr = lu.paddr + vaddr - lu.vaddr;
249 paddr = 0; /* ???. */
251 paddr = addr & TARGET_PAGE_MASK;