2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
98 ram_addr_t phys_ram_size;
100 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty;
102 static ram_addr_t phys_ram_alloc_offset = 0;
105 /* current CPU in the current thread. It is only valid inside
107 CPUState *cpu_single_env;
109 typedef struct PageDesc {
110 /* list of TBs intersecting this ram page */
111 TranslationBlock *first_tb;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116 #if defined(CONFIG_USER_ONLY)
121 typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset;
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
140 static void io_mem_init(void);
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
151 /* io memory support */
152 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155 static int io_mem_nb;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch;
161 char *logfilename = "/tmp/qemu.log";
164 static int log_append = 0;
167 static int tlb_flush_count;
168 static int tb_flush_count;
169 static int tb_phys_invalidate_count;
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t {
173 target_phys_addr_t base;
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
180 static void map_exec(void *addr, long size)
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
188 static void map_exec(void *addr, long size)
190 unsigned long start, end, page_size;
192 page_size = getpagesize();
193 start = (unsigned long)addr;
194 start &= ~(page_size - 1);
196 end = (unsigned long)addr + size;
197 end += page_size - 1;
198 end &= ~(page_size - 1);
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
205 static void page_init(void)
207 /* NOTE: we can always suppose that qemu_host_page_size >=
211 SYSTEM_INFO system_info;
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
218 qemu_real_host_page_size = getpagesize();
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr, endaddr;
237 f = fopen("/proc/self/maps", "r");
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242 startaddr = MIN(startaddr,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244 endaddr = MIN(endaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 page_set_flags(startaddr & TARGET_PAGE_MASK,
247 TARGET_PAGE_ALIGN(endaddr),
257 static inline PageDesc *page_find_alloc(target_ulong index)
261 lp = &l1_map[index >> L2_BITS];
264 /* allocate if not found */
265 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
269 return p + (index & (L2_SIZE - 1));
272 static inline PageDesc *page_find(target_ulong index)
276 p = l1_map[index >> L2_BITS];
279 return p + (index & (L2_SIZE - 1));
282 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
287 p = (void **)l1_phys_map;
288 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
290 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
296 /* allocate if not found */
299 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300 memset(p, 0, sizeof(void *) * L1_SIZE);
304 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
308 /* allocate if not found */
311 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313 for (i = 0; i < L2_SIZE; i++)
314 pd[i].phys_offset = IO_MEM_UNASSIGNED;
316 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
319 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
321 return phys_page_find_alloc(index, 0);
324 #if !defined(CONFIG_USER_ONLY)
325 static void tlb_protect_code(ram_addr_t ram_addr);
326 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
330 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
332 #if defined(CONFIG_USER_ONLY)
333 /* Currently it is not recommanded to allocate big chunks of data in
334 user mode. It will change when a dedicated libc will be used */
335 #define USE_STATIC_CODE_GEN_BUFFER
338 #ifdef USE_STATIC_CODE_GEN_BUFFER
339 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
342 void code_gen_alloc(unsigned long tb_size)
344 #ifdef USE_STATIC_CODE_GEN_BUFFER
345 code_gen_buffer = static_code_gen_buffer;
346 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
347 map_exec(code_gen_buffer, code_gen_buffer_size);
349 code_gen_buffer_size = tb_size;
350 if (code_gen_buffer_size == 0) {
351 #if defined(CONFIG_USER_ONLY)
352 /* in user mode, phys_ram_size is not meaningful */
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
355 /* XXX: needs ajustments */
356 code_gen_buffer_size = (int)(phys_ram_size / 4);
359 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
360 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
361 /* The code gen buffer location may have constraints depending on
362 the host cpu and OS */
363 #if defined(__linux__)
366 flags = MAP_PRIVATE | MAP_ANONYMOUS;
367 #if defined(__x86_64__)
369 /* Cannot map more than that */
370 if (code_gen_buffer_size > (800 * 1024 * 1024))
371 code_gen_buffer_size = (800 * 1024 * 1024);
373 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
374 PROT_WRITE | PROT_READ | PROT_EXEC,
376 if (code_gen_buffer == MAP_FAILED) {
377 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
382 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
383 if (!code_gen_buffer) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
387 map_exec(code_gen_buffer, code_gen_buffer_size);
389 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
390 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
391 code_gen_buffer_max_size = code_gen_buffer_size -
392 code_gen_max_block_size();
393 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
394 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
397 /* Must be called before using the QEMU cpus. 'tb_size' is the size
398 (in bytes) allocated to the translation buffer. Zero means default
400 void cpu_exec_init_all(unsigned long tb_size)
403 code_gen_alloc(tb_size);
404 code_gen_ptr = code_gen_buffer;
409 void cpu_exec_init(CPUState *env)
414 env->next_cpu = NULL;
417 while (*penv != NULL) {
418 penv = (CPUState **)&(*penv)->next_cpu;
421 env->cpu_index = cpu_index;
422 env->nb_watchpoints = 0;
426 static inline void invalidate_page_bitmap(PageDesc *p)
428 if (p->code_bitmap) {
429 qemu_free(p->code_bitmap);
430 p->code_bitmap = NULL;
432 p->code_write_count = 0;
435 /* set to NULL all the 'first_tb' fields in all PageDescs */
436 static void page_flush_tb(void)
441 for(i = 0; i < L1_SIZE; i++) {
444 for(j = 0; j < L2_SIZE; j++) {
446 invalidate_page_bitmap(p);
453 /* flush all the translation blocks */
454 /* XXX: tb_flush is currently not thread safe */
455 void tb_flush(CPUState *env1)
458 #if defined(DEBUG_FLUSH)
459 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
460 (unsigned long)(code_gen_ptr - code_gen_buffer),
462 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
464 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
465 cpu_abort(env1, "Internal error: code buffer overflow\n");
469 for(env = first_cpu; env != NULL; env = env->next_cpu) {
470 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
473 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
476 code_gen_ptr = code_gen_buffer;
477 /* XXX: flush processor icache at this point if cache flush is
482 #ifdef DEBUG_TB_CHECK
484 static void tb_invalidate_check(target_ulong address)
486 TranslationBlock *tb;
488 address &= TARGET_PAGE_MASK;
489 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
490 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
491 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
492 address >= tb->pc + tb->size)) {
493 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
494 address, (long)tb->pc, tb->size);
500 /* verify that all the pages have correct rights for code */
501 static void tb_page_check(void)
503 TranslationBlock *tb;
504 int i, flags1, flags2;
506 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
507 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
508 flags1 = page_get_flags(tb->pc);
509 flags2 = page_get_flags(tb->pc + tb->size - 1);
510 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
511 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
512 (long)tb->pc, tb->size, flags1, flags2);
518 void tb_jmp_check(TranslationBlock *tb)
520 TranslationBlock *tb1;
523 /* suppress any remaining jumps to this TB */
527 tb1 = (TranslationBlock *)((long)tb1 & ~3);
530 tb1 = tb1->jmp_next[n1];
532 /* check end of list */
534 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
540 /* invalidate one TB */
541 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
544 TranslationBlock *tb1;
548 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
551 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
555 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
557 TranslationBlock *tb1;
563 tb1 = (TranslationBlock *)((long)tb1 & ~3);
565 *ptb = tb1->page_next[n1];
568 ptb = &tb1->page_next[n1];
572 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
574 TranslationBlock *tb1, **ptb;
577 ptb = &tb->jmp_next[n];
580 /* find tb(n) in circular list */
584 tb1 = (TranslationBlock *)((long)tb1 & ~3);
585 if (n1 == n && tb1 == tb)
588 ptb = &tb1->jmp_first;
590 ptb = &tb1->jmp_next[n1];
593 /* now we can suppress tb(n) from the list */
594 *ptb = tb->jmp_next[n];
596 tb->jmp_next[n] = NULL;
600 /* reset the jump entry 'n' of a TB so that it is not chained to
602 static inline void tb_reset_jump(TranslationBlock *tb, int n)
604 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
607 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
612 target_phys_addr_t phys_pc;
613 TranslationBlock *tb1, *tb2;
615 /* remove the TB from the hash list */
616 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 h = tb_phys_hash_func(phys_pc);
618 tb_remove(&tb_phys_hash[h], tb,
619 offsetof(TranslationBlock, phys_hash_next));
621 /* remove the TB from the page list */
622 if (tb->page_addr[0] != page_addr) {
623 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624 tb_page_remove(&p->first_tb, tb);
625 invalidate_page_bitmap(p);
627 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
633 tb_invalidated_flag = 1;
635 /* remove the TB from the hash list */
636 h = tb_jmp_cache_hash_func(tb->pc);
637 for(env = first_cpu; env != NULL; env = env->next_cpu) {
638 if (env->tb_jmp_cache[h] == tb)
639 env->tb_jmp_cache[h] = NULL;
642 /* suppress this TB from the two jump lists */
643 tb_jmp_remove(tb, 0);
644 tb_jmp_remove(tb, 1);
646 /* suppress any remaining jumps to this TB */
652 tb1 = (TranslationBlock *)((long)tb1 & ~3);
653 tb2 = tb1->jmp_next[n1];
654 tb_reset_jump(tb1, n1);
655 tb1->jmp_next[n1] = NULL;
658 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
660 tb_phys_invalidate_count++;
663 static inline void set_bits(uint8_t *tab, int start, int len)
669 mask = 0xff << (start & 7);
670 if ((start & ~7) == (end & ~7)) {
672 mask &= ~(0xff << (end & 7));
677 start = (start + 8) & ~7;
679 while (start < end1) {
684 mask = ~(0xff << (end & 7));
690 static void build_page_bitmap(PageDesc *p)
692 int n, tb_start, tb_end;
693 TranslationBlock *tb;
695 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
698 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
703 tb = (TranslationBlock *)((long)tb & ~3);
704 /* NOTE: this is subtle as a TB may span two physical pages */
706 /* NOTE: tb_end may be after the end of the page, but
707 it is not a problem */
708 tb_start = tb->pc & ~TARGET_PAGE_MASK;
709 tb_end = tb_start + tb->size;
710 if (tb_end > TARGET_PAGE_SIZE)
711 tb_end = TARGET_PAGE_SIZE;
714 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
716 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
717 tb = tb->page_next[n];
721 #ifdef TARGET_HAS_PRECISE_SMC
723 static void tb_gen_code(CPUState *env,
724 target_ulong pc, target_ulong cs_base, int flags,
727 TranslationBlock *tb;
729 target_ulong phys_pc, phys_page2, virt_page2;
732 phys_pc = get_phys_addr_code(env, pc);
735 /* flush must be done */
737 /* cannot fail at this point */
740 tc_ptr = code_gen_ptr;
742 tb->cs_base = cs_base;
745 cpu_gen_code(env, tb, &code_gen_size);
746 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
748 /* check next page if needed */
749 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
751 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
752 phys_page2 = get_phys_addr_code(env, virt_page2);
754 tb_link_phys(tb, phys_pc, phys_page2);
758 /* invalidate all TBs which intersect with the target physical page
759 starting in range [start;end[. NOTE: start and end must refer to
760 the same physical page. 'is_cpu_write_access' should be true if called
761 from a real cpu write access: the virtual CPU will exit the current
762 TB if code is modified inside this TB. */
763 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
764 int is_cpu_write_access)
766 int n, current_tb_modified, current_tb_not_found, current_flags;
767 CPUState *env = cpu_single_env;
769 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
770 target_ulong tb_start, tb_end;
771 target_ulong current_pc, current_cs_base;
773 p = page_find(start >> TARGET_PAGE_BITS);
776 if (!p->code_bitmap &&
777 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
778 is_cpu_write_access) {
779 /* build code bitmap */
780 build_page_bitmap(p);
783 /* we remove all the TBs in the range [start, end[ */
784 /* XXX: see if in some cases it could be faster to invalidate all the code */
785 current_tb_not_found = is_cpu_write_access;
786 current_tb_modified = 0;
787 current_tb = NULL; /* avoid warning */
788 current_pc = 0; /* avoid warning */
789 current_cs_base = 0; /* avoid warning */
790 current_flags = 0; /* avoid warning */
794 tb = (TranslationBlock *)((long)tb & ~3);
795 tb_next = tb->page_next[n];
796 /* NOTE: this is subtle as a TB may span two physical pages */
798 /* NOTE: tb_end may be after the end of the page, but
799 it is not a problem */
800 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
801 tb_end = tb_start + tb->size;
803 tb_start = tb->page_addr[1];
804 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
806 if (!(tb_end <= start || tb_start >= end)) {
807 #ifdef TARGET_HAS_PRECISE_SMC
808 if (current_tb_not_found) {
809 current_tb_not_found = 0;
811 if (env->mem_write_pc) {
812 /* now we have a real cpu fault */
813 current_tb = tb_find_pc(env->mem_write_pc);
816 if (current_tb == tb &&
817 !(current_tb->cflags & CF_SINGLE_INSN)) {
818 /* If we are modifying the current TB, we must stop
819 its execution. We could be more precise by checking
820 that the modification is after the current PC, but it
821 would require a specialized function to partially
822 restore the CPU state */
824 current_tb_modified = 1;
825 cpu_restore_state(current_tb, env,
826 env->mem_write_pc, NULL);
827 #if defined(TARGET_I386)
828 current_flags = env->hflags;
829 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
830 current_cs_base = (target_ulong)env->segs[R_CS].base;
831 current_pc = current_cs_base + env->eip;
833 #error unsupported CPU
836 #endif /* TARGET_HAS_PRECISE_SMC */
837 /* we need to do that to handle the case where a signal
838 occurs while doing tb_phys_invalidate() */
841 saved_tb = env->current_tb;
842 env->current_tb = NULL;
844 tb_phys_invalidate(tb, -1);
846 env->current_tb = saved_tb;
847 if (env->interrupt_request && env->current_tb)
848 cpu_interrupt(env, env->interrupt_request);
853 #if !defined(CONFIG_USER_ONLY)
854 /* if no code remaining, no need to continue to use slow writes */
856 invalidate_page_bitmap(p);
857 if (is_cpu_write_access) {
858 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
862 #ifdef TARGET_HAS_PRECISE_SMC
863 if (current_tb_modified) {
864 /* we generate a block containing just the instruction
865 modifying the memory. It will ensure that it cannot modify
867 env->current_tb = NULL;
868 tb_gen_code(env, current_pc, current_cs_base, current_flags,
870 cpu_resume_from_signal(env, NULL);
875 /* len must be <= 8 and start must be a multiple of len */
876 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
883 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
884 cpu_single_env->mem_write_vaddr, len,
886 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
890 p = page_find(start >> TARGET_PAGE_BITS);
893 if (p->code_bitmap) {
894 offset = start & ~TARGET_PAGE_MASK;
895 b = p->code_bitmap[offset >> 3] >> (offset & 7);
896 if (b & ((1 << len) - 1))
900 tb_invalidate_phys_page_range(start, start + len, 1);
904 #if !defined(CONFIG_SOFTMMU)
905 static void tb_invalidate_phys_page(target_phys_addr_t addr,
906 unsigned long pc, void *puc)
908 int n, current_flags, current_tb_modified;
909 target_ulong current_pc, current_cs_base;
911 TranslationBlock *tb, *current_tb;
912 #ifdef TARGET_HAS_PRECISE_SMC
913 CPUState *env = cpu_single_env;
916 addr &= TARGET_PAGE_MASK;
917 p = page_find(addr >> TARGET_PAGE_BITS);
921 current_tb_modified = 0;
923 current_pc = 0; /* avoid warning */
924 current_cs_base = 0; /* avoid warning */
925 current_flags = 0; /* avoid warning */
926 #ifdef TARGET_HAS_PRECISE_SMC
928 current_tb = tb_find_pc(pc);
933 tb = (TranslationBlock *)((long)tb & ~3);
934 #ifdef TARGET_HAS_PRECISE_SMC
935 if (current_tb == tb &&
936 !(current_tb->cflags & CF_SINGLE_INSN)) {
937 /* If we are modifying the current TB, we must stop
938 its execution. We could be more precise by checking
939 that the modification is after the current PC, but it
940 would require a specialized function to partially
941 restore the CPU state */
943 current_tb_modified = 1;
944 cpu_restore_state(current_tb, env, pc, puc);
945 #if defined(TARGET_I386)
946 current_flags = env->hflags;
947 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
948 current_cs_base = (target_ulong)env->segs[R_CS].base;
949 current_pc = current_cs_base + env->eip;
951 #error unsupported CPU
954 #endif /* TARGET_HAS_PRECISE_SMC */
955 tb_phys_invalidate(tb, addr);
956 tb = tb->page_next[n];
959 #ifdef TARGET_HAS_PRECISE_SMC
960 if (current_tb_modified) {
961 /* we generate a block containing just the instruction
962 modifying the memory. It will ensure that it cannot modify
964 env->current_tb = NULL;
965 tb_gen_code(env, current_pc, current_cs_base, current_flags,
967 cpu_resume_from_signal(env, puc);
973 /* add the tb in the target page and protect it if necessary */
974 static inline void tb_alloc_page(TranslationBlock *tb,
975 unsigned int n, target_ulong page_addr)
978 TranslationBlock *last_first_tb;
980 tb->page_addr[n] = page_addr;
981 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
982 tb->page_next[n] = p->first_tb;
983 last_first_tb = p->first_tb;
984 p->first_tb = (TranslationBlock *)((long)tb | n);
985 invalidate_page_bitmap(p);
987 #if defined(TARGET_HAS_SMC) || 1
989 #if defined(CONFIG_USER_ONLY)
990 if (p->flags & PAGE_WRITE) {
995 /* force the host page as non writable (writes will have a
996 page fault + mprotect overhead) */
997 page_addr &= qemu_host_page_mask;
999 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1000 addr += TARGET_PAGE_SIZE) {
1002 p2 = page_find (addr >> TARGET_PAGE_BITS);
1006 p2->flags &= ~PAGE_WRITE;
1007 page_get_flags(addr);
1009 mprotect(g2h(page_addr), qemu_host_page_size,
1010 (prot & PAGE_BITS) & ~PAGE_WRITE);
1011 #ifdef DEBUG_TB_INVALIDATE
1012 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1017 /* if some code is already present, then the pages are already
1018 protected. So we handle the case where only the first TB is
1019 allocated in a physical page */
1020 if (!last_first_tb) {
1021 tlb_protect_code(page_addr);
1025 #endif /* TARGET_HAS_SMC */
1028 /* Allocate a new translation block. Flush the translation buffer if
1029 too many translation blocks or too much generated code. */
1030 TranslationBlock *tb_alloc(target_ulong pc)
1032 TranslationBlock *tb;
1034 if (nb_tbs >= code_gen_max_blocks ||
1035 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1037 tb = &tbs[nb_tbs++];
1043 /* add a new TB and link it to the physical page tables. phys_page2 is
1044 (-1) to indicate that only one page contains the TB. */
1045 void tb_link_phys(TranslationBlock *tb,
1046 target_ulong phys_pc, target_ulong phys_page2)
1049 TranslationBlock **ptb;
1051 /* add in the physical hash table */
1052 h = tb_phys_hash_func(phys_pc);
1053 ptb = &tb_phys_hash[h];
1054 tb->phys_hash_next = *ptb;
1057 /* add in the page list */
1058 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1059 if (phys_page2 != -1)
1060 tb_alloc_page(tb, 1, phys_page2);
1062 tb->page_addr[1] = -1;
1064 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1065 tb->jmp_next[0] = NULL;
1066 tb->jmp_next[1] = NULL;
1068 /* init original jump addresses */
1069 if (tb->tb_next_offset[0] != 0xffff)
1070 tb_reset_jump(tb, 0);
1071 if (tb->tb_next_offset[1] != 0xffff)
1072 tb_reset_jump(tb, 1);
1074 #ifdef DEBUG_TB_CHECK
1079 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1080 tb[1].tc_ptr. Return NULL if not found */
1081 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1083 int m_min, m_max, m;
1085 TranslationBlock *tb;
1089 if (tc_ptr < (unsigned long)code_gen_buffer ||
1090 tc_ptr >= (unsigned long)code_gen_ptr)
1092 /* binary search (cf Knuth) */
1095 while (m_min <= m_max) {
1096 m = (m_min + m_max) >> 1;
1098 v = (unsigned long)tb->tc_ptr;
1101 else if (tc_ptr < v) {
1110 static void tb_reset_jump_recursive(TranslationBlock *tb);
1112 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1114 TranslationBlock *tb1, *tb_next, **ptb;
1117 tb1 = tb->jmp_next[n];
1119 /* find head of list */
1122 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1125 tb1 = tb1->jmp_next[n1];
1127 /* we are now sure now that tb jumps to tb1 */
1130 /* remove tb from the jmp_first list */
1131 ptb = &tb_next->jmp_first;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == n && tb1 == tb)
1138 ptb = &tb1->jmp_next[n1];
1140 *ptb = tb->jmp_next[n];
1141 tb->jmp_next[n] = NULL;
1143 /* suppress the jump to next tb in generated code */
1144 tb_reset_jump(tb, n);
1146 /* suppress jumps in the tb on which we could have jumped */
1147 tb_reset_jump_recursive(tb_next);
1151 static void tb_reset_jump_recursive(TranslationBlock *tb)
1153 tb_reset_jump_recursive2(tb, 0);
1154 tb_reset_jump_recursive2(tb, 1);
1157 #if defined(TARGET_HAS_ICE)
1158 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1160 target_phys_addr_t addr;
1162 ram_addr_t ram_addr;
1165 addr = cpu_get_phys_page_debug(env, pc);
1166 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1168 pd = IO_MEM_UNASSIGNED;
1170 pd = p->phys_offset;
1172 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1173 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1177 /* Add a watchpoint. */
1178 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1182 for (i = 0; i < env->nb_watchpoints; i++) {
1183 if (addr == env->watchpoint[i].vaddr)
1186 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1189 i = env->nb_watchpoints++;
1190 env->watchpoint[i].vaddr = addr;
1191 tlb_flush_page(env, addr);
1192 /* FIXME: This flush is needed because of the hack to make memory ops
1193 terminate the TB. It can be removed once the proper IO trap and
1194 re-execute bits are in. */
1199 /* Remove a watchpoint. */
1200 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1204 for (i = 0; i < env->nb_watchpoints; i++) {
1205 if (addr == env->watchpoint[i].vaddr) {
1206 env->nb_watchpoints--;
1207 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1208 tlb_flush_page(env, addr);
1215 /* Remove all watchpoints. */
1216 void cpu_watchpoint_remove_all(CPUState *env) {
1219 for (i = 0; i < env->nb_watchpoints; i++) {
1220 tlb_flush_page(env, env->watchpoint[i].vaddr);
1222 env->nb_watchpoints = 0;
1225 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1226 breakpoint is reached */
1227 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1229 #if defined(TARGET_HAS_ICE)
1232 for(i = 0; i < env->nb_breakpoints; i++) {
1233 if (env->breakpoints[i] == pc)
1237 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1239 env->breakpoints[env->nb_breakpoints++] = pc;
1241 breakpoint_invalidate(env, pc);
1248 /* remove all breakpoints */
1249 void cpu_breakpoint_remove_all(CPUState *env) {
1250 #if defined(TARGET_HAS_ICE)
1252 for(i = 0; i < env->nb_breakpoints; i++) {
1253 breakpoint_invalidate(env, env->breakpoints[i]);
1255 env->nb_breakpoints = 0;
1259 /* remove a breakpoint */
1260 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1262 #if defined(TARGET_HAS_ICE)
1264 for(i = 0; i < env->nb_breakpoints; i++) {
1265 if (env->breakpoints[i] == pc)
1270 env->nb_breakpoints--;
1271 if (i < env->nb_breakpoints)
1272 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1274 breakpoint_invalidate(env, pc);
1281 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1282 CPU loop after each instruction */
1283 void cpu_single_step(CPUState *env, int enabled)
1285 #if defined(TARGET_HAS_ICE)
1286 if (env->singlestep_enabled != enabled) {
1287 env->singlestep_enabled = enabled;
1288 /* must flush all the translated code to avoid inconsistancies */
1289 /* XXX: only flush what is necessary */
1295 /* enable or disable low levels log */
1296 void cpu_set_log(int log_flags)
1298 loglevel = log_flags;
1299 if (loglevel && !logfile) {
1300 logfile = fopen(logfilename, log_append ? "a" : "w");
1302 perror(logfilename);
1305 #if !defined(CONFIG_SOFTMMU)
1306 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1308 static uint8_t logfile_buf[4096];
1309 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1312 setvbuf(logfile, NULL, _IOLBF, 0);
1316 if (!loglevel && logfile) {
1322 void cpu_set_log_filename(const char *filename)
1324 logfilename = strdup(filename);
1329 cpu_set_log(loglevel);
1332 /* mask must never be zero, except for A20 change call */
1333 void cpu_interrupt(CPUState *env, int mask)
1335 TranslationBlock *tb;
1336 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1338 env->interrupt_request |= mask;
1339 /* if the cpu is currently executing code, we must unlink it and
1340 all the potentially executing TB */
1341 tb = env->current_tb;
1342 if (tb && !testandset(&interrupt_lock)) {
1343 env->current_tb = NULL;
1344 tb_reset_jump_recursive(tb);
1345 resetlock(&interrupt_lock);
1349 void cpu_reset_interrupt(CPUState *env, int mask)
1351 env->interrupt_request &= ~mask;
1354 CPULogItem cpu_log_items[] = {
1355 { CPU_LOG_TB_OUT_ASM, "out_asm",
1356 "show generated host assembly code for each compiled TB" },
1357 { CPU_LOG_TB_IN_ASM, "in_asm",
1358 "show target assembly code for each compiled TB" },
1359 { CPU_LOG_TB_OP, "op",
1360 "show micro ops for each compiled TB" },
1361 { CPU_LOG_TB_OP_OPT, "op_opt",
1364 "before eflags optimization and "
1366 "after liveness analysis" },
1367 { CPU_LOG_INT, "int",
1368 "show interrupts/exceptions in short format" },
1369 { CPU_LOG_EXEC, "exec",
1370 "show trace before each executed TB (lots of logs)" },
1371 { CPU_LOG_TB_CPU, "cpu",
1372 "show CPU state before block translation" },
1374 { CPU_LOG_PCALL, "pcall",
1375 "show protected mode far calls/returns/exceptions" },
1378 { CPU_LOG_IOPORT, "ioport",
1379 "show all i/o ports accesses" },
1384 static int cmp1(const char *s1, int n, const char *s2)
1386 if (strlen(s2) != n)
1388 return memcmp(s1, s2, n) == 0;
1391 /* takes a comma separated list of log masks. Return 0 if error. */
1392 int cpu_str_to_log_mask(const char *str)
1401 p1 = strchr(p, ',');
1404 if(cmp1(p,p1-p,"all")) {
1405 for(item = cpu_log_items; item->mask != 0; item++) {
1409 for(item = cpu_log_items; item->mask != 0; item++) {
1410 if (cmp1(p, p1 - p, item->name))
1424 void cpu_abort(CPUState *env, const char *fmt, ...)
1431 fprintf(stderr, "qemu: fatal: ");
1432 vfprintf(stderr, fmt, ap);
1433 fprintf(stderr, "\n");
1435 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1437 cpu_dump_state(env, stderr, fprintf, 0);
1440 fprintf(logfile, "qemu: fatal: ");
1441 vfprintf(logfile, fmt, ap2);
1442 fprintf(logfile, "\n");
1444 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1446 cpu_dump_state(env, logfile, fprintf, 0);
1456 CPUState *cpu_copy(CPUState *env)
1458 CPUState *new_env = cpu_init(env->cpu_model_str);
1459 /* preserve chaining and index */
1460 CPUState *next_cpu = new_env->next_cpu;
1461 int cpu_index = new_env->cpu_index;
1462 memcpy(new_env, env, sizeof(CPUState));
1463 new_env->next_cpu = next_cpu;
1464 new_env->cpu_index = cpu_index;
1468 #if !defined(CONFIG_USER_ONLY)
1470 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1474 /* Discard jump cache entries for any tb which might potentially
1475 overlap the flushed page. */
1476 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1477 memset (&env->tb_jmp_cache[i], 0,
1478 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1480 i = tb_jmp_cache_hash_page(addr);
1481 memset (&env->tb_jmp_cache[i], 0,
1482 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1485 /* NOTE: if flush_global is true, also flush global entries (not
1487 void tlb_flush(CPUState *env, int flush_global)
1491 #if defined(DEBUG_TLB)
1492 printf("tlb_flush:\n");
1494 /* must reset current TB so that interrupts cannot modify the
1495 links while we are modifying them */
1496 env->current_tb = NULL;
1498 for(i = 0; i < CPU_TLB_SIZE; i++) {
1499 env->tlb_table[0][i].addr_read = -1;
1500 env->tlb_table[0][i].addr_write = -1;
1501 env->tlb_table[0][i].addr_code = -1;
1502 env->tlb_table[1][i].addr_read = -1;
1503 env->tlb_table[1][i].addr_write = -1;
1504 env->tlb_table[1][i].addr_code = -1;
1505 #if (NB_MMU_MODES >= 3)
1506 env->tlb_table[2][i].addr_read = -1;
1507 env->tlb_table[2][i].addr_write = -1;
1508 env->tlb_table[2][i].addr_code = -1;
1509 #if (NB_MMU_MODES == 4)
1510 env->tlb_table[3][i].addr_read = -1;
1511 env->tlb_table[3][i].addr_write = -1;
1512 env->tlb_table[3][i].addr_code = -1;
1517 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1519 #if !defined(CONFIG_SOFTMMU)
1520 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1523 if (env->kqemu_enabled) {
1524 kqemu_flush(env, flush_global);
1530 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1532 if (addr == (tlb_entry->addr_read &
1533 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1534 addr == (tlb_entry->addr_write &
1535 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1536 addr == (tlb_entry->addr_code &
1537 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1538 tlb_entry->addr_read = -1;
1539 tlb_entry->addr_write = -1;
1540 tlb_entry->addr_code = -1;
1544 void tlb_flush_page(CPUState *env, target_ulong addr)
1548 #if defined(DEBUG_TLB)
1549 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1551 /* must reset current TB so that interrupts cannot modify the
1552 links while we are modifying them */
1553 env->current_tb = NULL;
1555 addr &= TARGET_PAGE_MASK;
1556 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1557 tlb_flush_entry(&env->tlb_table[0][i], addr);
1558 tlb_flush_entry(&env->tlb_table[1][i], addr);
1559 #if (NB_MMU_MODES >= 3)
1560 tlb_flush_entry(&env->tlb_table[2][i], addr);
1561 #if (NB_MMU_MODES == 4)
1562 tlb_flush_entry(&env->tlb_table[3][i], addr);
1566 tlb_flush_jmp_cache(env, addr);
1568 #if !defined(CONFIG_SOFTMMU)
1569 if (addr < MMAP_AREA_END)
1570 munmap((void *)addr, TARGET_PAGE_SIZE);
1573 if (env->kqemu_enabled) {
1574 kqemu_flush_page(env, addr);
1579 /* update the TLBs so that writes to code in the virtual page 'addr'
1581 static void tlb_protect_code(ram_addr_t ram_addr)
1583 cpu_physical_memory_reset_dirty(ram_addr,
1584 ram_addr + TARGET_PAGE_SIZE,
1588 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1589 tested for self modifying code */
1590 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1593 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1596 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1597 unsigned long start, unsigned long length)
1600 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1601 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1602 if ((addr - start) < length) {
1603 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1608 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1612 unsigned long length, start1;
1616 start &= TARGET_PAGE_MASK;
1617 end = TARGET_PAGE_ALIGN(end);
1619 length = end - start;
1622 len = length >> TARGET_PAGE_BITS;
1624 /* XXX: should not depend on cpu context */
1626 if (env->kqemu_enabled) {
1629 for(i = 0; i < len; i++) {
1630 kqemu_set_notdirty(env, addr);
1631 addr += TARGET_PAGE_SIZE;
1635 mask = ~dirty_flags;
1636 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1637 for(i = 0; i < len; i++)
1640 /* we modify the TLB cache so that the dirty bit will be set again
1641 when accessing the range */
1642 start1 = start + (unsigned long)phys_ram_base;
1643 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1644 for(i = 0; i < CPU_TLB_SIZE; i++)
1645 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1646 for(i = 0; i < CPU_TLB_SIZE; i++)
1647 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1648 #if (NB_MMU_MODES >= 3)
1649 for(i = 0; i < CPU_TLB_SIZE; i++)
1650 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1651 #if (NB_MMU_MODES == 4)
1652 for(i = 0; i < CPU_TLB_SIZE; i++)
1653 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1658 #if !defined(CONFIG_SOFTMMU)
1659 /* XXX: this is expensive */
1665 for(i = 0; i < L1_SIZE; i++) {
1668 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1669 for(j = 0; j < L2_SIZE; j++) {
1670 if (p->valid_tag == virt_valid_tag &&
1671 p->phys_addr >= start && p->phys_addr < end &&
1672 (p->prot & PROT_WRITE)) {
1673 if (addr < MMAP_AREA_END) {
1674 mprotect((void *)addr, TARGET_PAGE_SIZE,
1675 p->prot & ~PROT_WRITE);
1678 addr += TARGET_PAGE_SIZE;
1687 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1689 ram_addr_t ram_addr;
1691 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1692 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1693 tlb_entry->addend - (unsigned long)phys_ram_base;
1694 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1695 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1700 /* update the TLB according to the current state of the dirty bits */
1701 void cpu_tlb_update_dirty(CPUState *env)
1704 for(i = 0; i < CPU_TLB_SIZE; i++)
1705 tlb_update_dirty(&env->tlb_table[0][i]);
1706 for(i = 0; i < CPU_TLB_SIZE; i++)
1707 tlb_update_dirty(&env->tlb_table[1][i]);
1708 #if (NB_MMU_MODES >= 3)
1709 for(i = 0; i < CPU_TLB_SIZE; i++)
1710 tlb_update_dirty(&env->tlb_table[2][i]);
1711 #if (NB_MMU_MODES == 4)
1712 for(i = 0; i < CPU_TLB_SIZE; i++)
1713 tlb_update_dirty(&env->tlb_table[3][i]);
1718 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1719 unsigned long start)
1722 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1723 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1724 if (addr == start) {
1725 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1730 /* update the TLB corresponding to virtual page vaddr and phys addr
1731 addr so that it is no longer dirty */
1732 static inline void tlb_set_dirty(CPUState *env,
1733 unsigned long addr, target_ulong vaddr)
1737 addr &= TARGET_PAGE_MASK;
1738 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1740 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1741 #if (NB_MMU_MODES >= 3)
1742 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1743 #if (NB_MMU_MODES == 4)
1744 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1749 /* add a new TLB entry. At most one entry for a given virtual address
1750 is permitted. Return 0 if OK or 2 if the page could not be mapped
1751 (can only happen in non SOFTMMU mode for I/O pages or pages
1752 conflicting with the host address space). */
1753 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754 target_phys_addr_t paddr, int prot,
1755 int mmu_idx, int is_softmmu)
1760 target_ulong address;
1761 target_phys_addr_t addend;
1766 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1768 pd = IO_MEM_UNASSIGNED;
1770 pd = p->phys_offset;
1772 #if defined(DEBUG_TLB)
1773 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1774 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1778 #if !defined(CONFIG_SOFTMMU)
1782 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1783 /* IO memory case */
1784 address = vaddr | pd;
1787 /* standard memory */
1789 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1792 /* Make accesses to pages with watchpoints go via the
1793 watchpoint trap routines. */
1794 for (i = 0; i < env->nb_watchpoints; i++) {
1795 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1796 if (address & ~TARGET_PAGE_MASK) {
1797 env->watchpoint[i].addend = 0;
1798 address = vaddr | io_mem_watch;
1800 env->watchpoint[i].addend = pd - paddr +
1801 (unsigned long) phys_ram_base;
1802 /* TODO: Figure out how to make read watchpoints coexist
1804 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1809 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1811 te = &env->tlb_table[mmu_idx][index];
1812 te->addend = addend;
1813 if (prot & PAGE_READ) {
1814 te->addr_read = address;
1819 if (prot & PAGE_EXEC) {
1820 te->addr_code = address;
1824 if (prot & PAGE_WRITE) {
1825 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1826 (pd & IO_MEM_ROMD)) {
1827 /* write access calls the I/O callback */
1828 te->addr_write = vaddr |
1829 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1830 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1831 !cpu_physical_memory_is_dirty(pd)) {
1832 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1834 te->addr_write = address;
1837 te->addr_write = -1;
1840 #if !defined(CONFIG_SOFTMMU)
1842 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1843 /* IO access: no mapping is done as it will be handled by the
1845 if (!(env->hflags & HF_SOFTMMU_MASK))
1850 if (vaddr >= MMAP_AREA_END) {
1853 if (prot & PROT_WRITE) {
1854 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1855 #if defined(TARGET_HAS_SMC) || 1
1858 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1859 !cpu_physical_memory_is_dirty(pd))) {
1860 /* ROM: we do as if code was inside */
1861 /* if code is present, we only map as read only and save the
1865 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1868 vp->valid_tag = virt_valid_tag;
1869 prot &= ~PAGE_WRITE;
1872 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1873 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1874 if (map_addr == MAP_FAILED) {
1875 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1885 /* called from signal handler: invalidate the code and unprotect the
1886 page. Return TRUE if the fault was succesfully handled. */
1887 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1889 #if !defined(CONFIG_SOFTMMU)
1892 #if defined(DEBUG_TLB)
1893 printf("page_unprotect: addr=0x%08x\n", addr);
1895 addr &= TARGET_PAGE_MASK;
1897 /* if it is not mapped, no need to worry here */
1898 if (addr >= MMAP_AREA_END)
1900 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1903 /* NOTE: in this case, validate_tag is _not_ tested as it
1904 validates only the code TLB */
1905 if (vp->valid_tag != virt_valid_tag)
1907 if (!(vp->prot & PAGE_WRITE))
1909 #if defined(DEBUG_TLB)
1910 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1911 addr, vp->phys_addr, vp->prot);
1913 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1914 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1915 (unsigned long)addr, vp->prot);
1916 /* set the dirty bit */
1917 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1918 /* flush the code inside */
1919 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1928 void tlb_flush(CPUState *env, int flush_global)
1932 void tlb_flush_page(CPUState *env, target_ulong addr)
1936 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1937 target_phys_addr_t paddr, int prot,
1938 int mmu_idx, int is_softmmu)
1943 /* dump memory mappings */
1944 void page_dump(FILE *f)
1946 unsigned long start, end;
1947 int i, j, prot, prot1;
1950 fprintf(f, "%-8s %-8s %-8s %s\n",
1951 "start", "end", "size", "prot");
1955 for(i = 0; i <= L1_SIZE; i++) {
1960 for(j = 0;j < L2_SIZE; j++) {
1965 if (prot1 != prot) {
1966 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1968 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1969 start, end, end - start,
1970 prot & PAGE_READ ? 'r' : '-',
1971 prot & PAGE_WRITE ? 'w' : '-',
1972 prot & PAGE_EXEC ? 'x' : '-');
1986 int page_get_flags(target_ulong address)
1990 p = page_find(address >> TARGET_PAGE_BITS);
1996 /* modify the flags of a page and invalidate the code if
1997 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1998 depending on PAGE_WRITE */
1999 void page_set_flags(target_ulong start, target_ulong end, int flags)
2004 start = start & TARGET_PAGE_MASK;
2005 end = TARGET_PAGE_ALIGN(end);
2006 if (flags & PAGE_WRITE)
2007 flags |= PAGE_WRITE_ORG;
2008 spin_lock(&tb_lock);
2009 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2010 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2011 /* if the write protection is set, then we invalidate the code
2013 if (!(p->flags & PAGE_WRITE) &&
2014 (flags & PAGE_WRITE) &&
2016 tb_invalidate_phys_page(addr, 0, NULL);
2020 spin_unlock(&tb_lock);
2023 int page_check_range(target_ulong start, target_ulong len, int flags)
2029 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2030 start = start & TARGET_PAGE_MASK;
2033 /* we've wrapped around */
2035 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2036 p = page_find(addr >> TARGET_PAGE_BITS);
2039 if( !(p->flags & PAGE_VALID) )
2042 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2044 if (flags & PAGE_WRITE) {
2045 if (!(p->flags & PAGE_WRITE_ORG))
2047 /* unprotect the page if it was put read-only because it
2048 contains translated code */
2049 if (!(p->flags & PAGE_WRITE)) {
2050 if (!page_unprotect(addr, 0, NULL))
2059 /* called from signal handler: invalidate the code and unprotect the
2060 page. Return TRUE if the fault was succesfully handled. */
2061 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2063 unsigned int page_index, prot, pindex;
2065 target_ulong host_start, host_end, addr;
2067 host_start = address & qemu_host_page_mask;
2068 page_index = host_start >> TARGET_PAGE_BITS;
2069 p1 = page_find(page_index);
2072 host_end = host_start + qemu_host_page_size;
2075 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2079 /* if the page was really writable, then we change its
2080 protection back to writable */
2081 if (prot & PAGE_WRITE_ORG) {
2082 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2083 if (!(p1[pindex].flags & PAGE_WRITE)) {
2084 mprotect((void *)g2h(host_start), qemu_host_page_size,
2085 (prot & PAGE_BITS) | PAGE_WRITE);
2086 p1[pindex].flags |= PAGE_WRITE;
2087 /* and since the content will be modified, we must invalidate
2088 the corresponding translated code. */
2089 tb_invalidate_phys_page(address, pc, puc);
2090 #ifdef DEBUG_TB_CHECK
2091 tb_invalidate_check(address);
2099 static inline void tlb_set_dirty(CPUState *env,
2100 unsigned long addr, target_ulong vaddr)
2103 #endif /* defined(CONFIG_USER_ONLY) */
2105 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2107 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108 ram_addr_t orig_memory);
2109 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2112 if (addr > start_addr) \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2129 /* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2132 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2134 ram_addr_t phys_offset)
2136 target_phys_addr_t addr, end_addr;
2139 ram_addr_t orig_size = size;
2142 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2143 end_addr = start_addr + (target_phys_addr_t)size;
2144 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2145 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2146 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2147 ram_addr_t orig_memory = p->phys_offset;
2148 target_phys_addr_t start_addr2, end_addr2;
2149 int need_subpage = 0;
2151 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2153 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2154 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2155 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2156 &p->phys_offset, orig_memory);
2158 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2161 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2163 p->phys_offset = phys_offset;
2164 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2165 (phys_offset & IO_MEM_ROMD))
2166 phys_offset += TARGET_PAGE_SIZE;
2169 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2170 p->phys_offset = phys_offset;
2171 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172 (phys_offset & IO_MEM_ROMD))
2173 phys_offset += TARGET_PAGE_SIZE;
2175 target_phys_addr_t start_addr2, end_addr2;
2176 int need_subpage = 0;
2178 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2179 end_addr2, need_subpage);
2181 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2182 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2183 &p->phys_offset, IO_MEM_UNASSIGNED);
2184 subpage_register(subpage, start_addr2, end_addr2,
2191 /* since each CPU stores ram addresses in its TLB cache, we must
2192 reset the modified entries */
2194 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2199 /* XXX: temporary until new memory mapping API */
2200 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2204 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2206 return IO_MEM_UNASSIGNED;
2207 return p->phys_offset;
2210 /* XXX: better than nothing */
2211 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2214 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2215 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2216 (uint64_t)size, (uint64_t)phys_ram_size);
2219 addr = phys_ram_alloc_offset;
2220 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2224 void qemu_ram_free(ram_addr_t addr)
2228 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2230 #ifdef DEBUG_UNASSIGNED
2231 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2234 do_unassigned_access(addr, 0, 0, 0);
2236 do_unassigned_access(addr, 0, 0, 0);
2241 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2243 #ifdef DEBUG_UNASSIGNED
2244 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2247 do_unassigned_access(addr, 1, 0, 0);
2249 do_unassigned_access(addr, 1, 0, 0);
2253 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2254 unassigned_mem_readb,
2255 unassigned_mem_readb,
2256 unassigned_mem_readb,
2259 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2260 unassigned_mem_writeb,
2261 unassigned_mem_writeb,
2262 unassigned_mem_writeb,
2265 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2267 unsigned long ram_addr;
2269 ram_addr = addr - (unsigned long)phys_ram_base;
2270 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2271 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2272 #if !defined(CONFIG_USER_ONLY)
2273 tb_invalidate_phys_page_fast(ram_addr, 1);
2274 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2277 stb_p((uint8_t *)(long)addr, val);
2279 if (cpu_single_env->kqemu_enabled &&
2280 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2281 kqemu_modify_page(cpu_single_env, ram_addr);
2283 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2284 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2285 /* we remove the notdirty callback only if the code has been
2287 if (dirty_flags == 0xff)
2288 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2291 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2293 unsigned long ram_addr;
2295 ram_addr = addr - (unsigned long)phys_ram_base;
2296 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2298 #if !defined(CONFIG_USER_ONLY)
2299 tb_invalidate_phys_page_fast(ram_addr, 2);
2300 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2303 stw_p((uint8_t *)(long)addr, val);
2305 if (cpu_single_env->kqemu_enabled &&
2306 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307 kqemu_modify_page(cpu_single_env, ram_addr);
2309 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311 /* we remove the notdirty callback only if the code has been
2313 if (dirty_flags == 0xff)
2314 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2317 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2319 unsigned long ram_addr;
2321 ram_addr = addr - (unsigned long)phys_ram_base;
2322 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2323 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2324 #if !defined(CONFIG_USER_ONLY)
2325 tb_invalidate_phys_page_fast(ram_addr, 4);
2326 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2329 stl_p((uint8_t *)(long)addr, val);
2331 if (cpu_single_env->kqemu_enabled &&
2332 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2333 kqemu_modify_page(cpu_single_env, ram_addr);
2335 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2336 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2337 /* we remove the notdirty callback only if the code has been
2339 if (dirty_flags == 0xff)
2340 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2343 static CPUReadMemoryFunc *error_mem_read[3] = {
2344 NULL, /* never used */
2345 NULL, /* never used */
2346 NULL, /* never used */
2349 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2350 notdirty_mem_writeb,
2351 notdirty_mem_writew,
2352 notdirty_mem_writel,
2355 #if defined(CONFIG_SOFTMMU)
2356 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2357 so these check for a hit then pass through to the normal out-of-line
2359 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2361 return ldub_phys(addr);
2364 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2366 return lduw_phys(addr);
2369 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2371 return ldl_phys(addr);
2374 /* Generate a debug exception if a watchpoint has been hit.
2375 Returns the real physical address of the access. addr will be a host
2376 address in case of a RAM location. */
2377 static target_ulong check_watchpoint(target_phys_addr_t addr)
2379 CPUState *env = cpu_single_env;
2381 target_ulong retaddr;
2385 for (i = 0; i < env->nb_watchpoints; i++) {
2386 watch = env->watchpoint[i].vaddr;
2387 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2388 retaddr = addr - env->watchpoint[i].addend;
2389 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2390 cpu_single_env->watchpoint_hit = i + 1;
2391 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2399 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2402 addr = check_watchpoint(addr);
2403 stb_phys(addr, val);
2406 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2409 addr = check_watchpoint(addr);
2410 stw_phys(addr, val);
2413 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2416 addr = check_watchpoint(addr);
2417 stl_phys(addr, val);
2420 static CPUReadMemoryFunc *watch_mem_read[3] = {
2426 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2433 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2439 idx = SUBPAGE_IDX(addr - mmio->base);
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2442 mmio, len, addr, idx);
2444 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2449 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2450 uint32_t value, unsigned int len)
2454 idx = SUBPAGE_IDX(addr - mmio->base);
2455 #if defined(DEBUG_SUBPAGE)
2456 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2457 mmio, len, addr, idx, value);
2459 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2462 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2464 #if defined(DEBUG_SUBPAGE)
2465 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2468 return subpage_readlen(opaque, addr, 0);
2471 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2474 #if defined(DEBUG_SUBPAGE)
2475 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2477 subpage_writelen(opaque, addr, value, 0);
2480 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2482 #if defined(DEBUG_SUBPAGE)
2483 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2486 return subpage_readlen(opaque, addr, 1);
2489 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2492 #if defined(DEBUG_SUBPAGE)
2493 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2495 subpage_writelen(opaque, addr, value, 1);
2498 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2500 #if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2504 return subpage_readlen(opaque, addr, 2);
2507 static void subpage_writel (void *opaque,
2508 target_phys_addr_t addr, uint32_t value)
2510 #if defined(DEBUG_SUBPAGE)
2511 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2513 subpage_writelen(opaque, addr, value, 2);
2516 static CPUReadMemoryFunc *subpage_read[] = {
2522 static CPUWriteMemoryFunc *subpage_write[] = {
2528 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2534 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2536 idx = SUBPAGE_IDX(start);
2537 eidx = SUBPAGE_IDX(end);
2538 #if defined(DEBUG_SUBPAGE)
2539 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2540 mmio, start, end, idx, eidx, memory);
2542 memory >>= IO_MEM_SHIFT;
2543 for (; idx <= eidx; idx++) {
2544 for (i = 0; i < 4; i++) {
2545 if (io_mem_read[memory][i]) {
2546 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2547 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2549 if (io_mem_write[memory][i]) {
2550 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2551 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2559 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2560 ram_addr_t orig_memory)
2565 mmio = qemu_mallocz(sizeof(subpage_t));
2568 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2569 #if defined(DEBUG_SUBPAGE)
2570 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2571 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2573 *phys = subpage_memory | IO_MEM_SUBPAGE;
2574 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2580 static void io_mem_init(void)
2582 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2583 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2584 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2587 #if defined(CONFIG_SOFTMMU)
2588 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2589 watch_mem_write, NULL);
2591 /* alloc dirty bits array */
2592 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2593 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2596 /* mem_read and mem_write are arrays of functions containing the
2597 function to access byte (index 0), word (index 1) and dword (index
2598 2). Functions can be omitted with a NULL function pointer. The
2599 registered functions may be modified dynamically later.
2600 If io_index is non zero, the corresponding io zone is
2601 modified. If it is zero, a new io zone is allocated. The return
2602 value can be used with cpu_register_physical_memory(). (-1) is
2603 returned if error. */
2604 int cpu_register_io_memory(int io_index,
2605 CPUReadMemoryFunc **mem_read,
2606 CPUWriteMemoryFunc **mem_write,
2609 int i, subwidth = 0;
2611 if (io_index <= 0) {
2612 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2614 io_index = io_mem_nb++;
2616 if (io_index >= IO_MEM_NB_ENTRIES)
2620 for(i = 0;i < 3; i++) {
2621 if (!mem_read[i] || !mem_write[i])
2622 subwidth = IO_MEM_SUBWIDTH;
2623 io_mem_read[io_index][i] = mem_read[i];
2624 io_mem_write[io_index][i] = mem_write[i];
2626 io_mem_opaque[io_index] = opaque;
2627 return (io_index << IO_MEM_SHIFT) | subwidth;
2630 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2632 return io_mem_write[io_index >> IO_MEM_SHIFT];
2635 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2637 return io_mem_read[io_index >> IO_MEM_SHIFT];
2640 /* physical memory access (slow version, mainly for debug) */
2641 #if defined(CONFIG_USER_ONLY)
2642 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2643 int len, int is_write)
2650 page = addr & TARGET_PAGE_MASK;
2651 l = (page + TARGET_PAGE_SIZE) - addr;
2654 flags = page_get_flags(page);
2655 if (!(flags & PAGE_VALID))
2658 if (!(flags & PAGE_WRITE))
2660 /* XXX: this code should not depend on lock_user */
2661 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2662 /* FIXME - should this return an error rather than just fail? */
2665 unlock_user(p, addr, l);
2667 if (!(flags & PAGE_READ))
2669 /* XXX: this code should not depend on lock_user */
2670 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2671 /* FIXME - should this return an error rather than just fail? */
2674 unlock_user(p, addr, 0);
2683 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2684 int len, int is_write)
2689 target_phys_addr_t page;
2694 page = addr & TARGET_PAGE_MASK;
2695 l = (page + TARGET_PAGE_SIZE) - addr;
2698 p = phys_page_find(page >> TARGET_PAGE_BITS);
2700 pd = IO_MEM_UNASSIGNED;
2702 pd = p->phys_offset;
2706 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2707 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2708 /* XXX: could force cpu_single_env to NULL to avoid
2710 if (l >= 4 && ((addr & 3) == 0)) {
2711 /* 32 bit write access */
2713 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2715 } else if (l >= 2 && ((addr & 1) == 0)) {
2716 /* 16 bit write access */
2718 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2721 /* 8 bit write access */
2723 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2727 unsigned long addr1;
2728 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2730 ptr = phys_ram_base + addr1;
2731 memcpy(ptr, buf, l);
2732 if (!cpu_physical_memory_is_dirty(addr1)) {
2733 /* invalidate code */
2734 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2736 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2737 (0xff & ~CODE_DIRTY_FLAG);
2741 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2742 !(pd & IO_MEM_ROMD)) {
2744 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2745 if (l >= 4 && ((addr & 3) == 0)) {
2746 /* 32 bit read access */
2747 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2750 } else if (l >= 2 && ((addr & 1) == 0)) {
2751 /* 16 bit read access */
2752 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2756 /* 8 bit read access */
2757 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2763 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2764 (addr & ~TARGET_PAGE_MASK);
2765 memcpy(buf, ptr, l);
2774 /* used for ROM loading : can write in RAM and ROM */
2775 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2776 const uint8_t *buf, int len)
2780 target_phys_addr_t page;
2785 page = addr & TARGET_PAGE_MASK;
2786 l = (page + TARGET_PAGE_SIZE) - addr;
2789 p = phys_page_find(page >> TARGET_PAGE_BITS);
2791 pd = IO_MEM_UNASSIGNED;
2793 pd = p->phys_offset;
2796 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2797 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2798 !(pd & IO_MEM_ROMD)) {
2801 unsigned long addr1;
2802 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2804 ptr = phys_ram_base + addr1;
2805 memcpy(ptr, buf, l);
2814 /* warning: addr must be aligned */
2815 uint32_t ldl_phys(target_phys_addr_t addr)
2823 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2825 pd = IO_MEM_UNASSIGNED;
2827 pd = p->phys_offset;
2830 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2831 !(pd & IO_MEM_ROMD)) {
2833 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2834 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2837 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2838 (addr & ~TARGET_PAGE_MASK);
2844 /* warning: addr must be aligned */
2845 uint64_t ldq_phys(target_phys_addr_t addr)
2853 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2855 pd = IO_MEM_UNASSIGNED;
2857 pd = p->phys_offset;
2860 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2861 !(pd & IO_MEM_ROMD)) {
2863 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2864 #ifdef TARGET_WORDS_BIGENDIAN
2865 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2866 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2868 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2869 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2873 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2874 (addr & ~TARGET_PAGE_MASK);
2881 uint32_t ldub_phys(target_phys_addr_t addr)
2884 cpu_physical_memory_read(addr, &val, 1);
2889 uint32_t lduw_phys(target_phys_addr_t addr)
2892 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2893 return tswap16(val);
2896 /* warning: addr must be aligned. The ram page is not masked as dirty
2897 and the code inside is not invalidated. It is useful if the dirty
2898 bits are used to track modified PTEs */
2899 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2906 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2908 pd = IO_MEM_UNASSIGNED;
2910 pd = p->phys_offset;
2913 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2914 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2915 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2917 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2918 (addr & ~TARGET_PAGE_MASK);
2923 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2930 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2932 pd = IO_MEM_UNASSIGNED;
2934 pd = p->phys_offset;
2937 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939 #ifdef TARGET_WORDS_BIGENDIAN
2940 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2941 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2943 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2944 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2947 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2948 (addr & ~TARGET_PAGE_MASK);
2953 /* warning: addr must be aligned */
2954 void stl_phys(target_phys_addr_t addr, uint32_t val)
2961 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2963 pd = IO_MEM_UNASSIGNED;
2965 pd = p->phys_offset;
2968 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2969 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2970 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2972 unsigned long addr1;
2973 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2975 ptr = phys_ram_base + addr1;
2977 if (!cpu_physical_memory_is_dirty(addr1)) {
2978 /* invalidate code */
2979 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2981 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2982 (0xff & ~CODE_DIRTY_FLAG);
2988 void stb_phys(target_phys_addr_t addr, uint32_t val)
2991 cpu_physical_memory_write(addr, &v, 1);
2995 void stw_phys(target_phys_addr_t addr, uint32_t val)
2997 uint16_t v = tswap16(val);
2998 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3002 void stq_phys(target_phys_addr_t addr, uint64_t val)
3005 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3010 /* virtual memory access for debug */
3011 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3012 uint8_t *buf, int len, int is_write)
3015 target_phys_addr_t phys_addr;
3019 page = addr & TARGET_PAGE_MASK;
3020 phys_addr = cpu_get_phys_page_debug(env, page);
3021 /* if no physical page mapped, return an error */
3022 if (phys_addr == -1)
3024 l = (page + TARGET_PAGE_SIZE) - addr;
3027 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3036 void dump_exec_info(FILE *f,
3037 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3039 int i, target_code_size, max_target_code_size;
3040 int direct_jmp_count, direct_jmp2_count, cross_page;
3041 TranslationBlock *tb;
3043 target_code_size = 0;
3044 max_target_code_size = 0;
3046 direct_jmp_count = 0;
3047 direct_jmp2_count = 0;
3048 for(i = 0; i < nb_tbs; i++) {
3050 target_code_size += tb->size;
3051 if (tb->size > max_target_code_size)
3052 max_target_code_size = tb->size;
3053 if (tb->page_addr[1] != -1)
3055 if (tb->tb_next_offset[0] != 0xffff) {
3057 if (tb->tb_next_offset[1] != 0xffff) {
3058 direct_jmp2_count++;
3062 /* XXX: avoid using doubles ? */
3063 cpu_fprintf(f, "Translation buffer state:\n");
3064 cpu_fprintf(f, "gen code size %ld/%ld\n",
3065 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3066 cpu_fprintf(f, "TB count %d/%d\n",
3067 nb_tbs, code_gen_max_blocks);
3068 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3069 nb_tbs ? target_code_size / nb_tbs : 0,
3070 max_target_code_size);
3071 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3072 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3073 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3074 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3076 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3077 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3079 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3081 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3082 cpu_fprintf(f, "\nStatistics:\n");
3083 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3084 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3085 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3086 tcg_dump_info(f, cpu_fprintf);
3089 #if !defined(CONFIG_USER_ONLY)
3091 #define MMUSUFFIX _cmmu
3092 #define GETPC() NULL
3093 #define env cpu_single_env
3094 #define SOFTMMU_CODE_ACCESS
3097 #include "softmmu_template.h"
3100 #include "softmmu_template.h"
3103 #include "softmmu_template.h"
3106 #include "softmmu_template.h"