2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
98 ram_addr_t phys_ram_size;
100 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty;
102 static ram_addr_t phys_ram_alloc_offset = 0;
105 /* current CPU in the current thread. It is only valid inside
107 CPUState *cpu_single_env;
109 typedef struct PageDesc {
110 /* list of TBs intersecting this ram page */
111 TranslationBlock *first_tb;
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116 #if defined(CONFIG_USER_ONLY)
121 typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
123 ram_addr_t phys_offset;
127 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128 /* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
132 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS)
140 static void io_mem_init(void);
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
151 /* io memory support */
152 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155 static int io_mem_nb;
156 #if defined(CONFIG_SOFTMMU)
157 static int io_mem_watch;
161 char *logfilename = "/tmp/qemu.log";
164 static int log_append = 0;
167 static int tlb_flush_count;
168 static int tb_flush_count;
169 static int tb_phys_invalidate_count;
171 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172 typedef struct subpage_t {
173 target_phys_addr_t base;
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
180 static void map_exec(void *addr, long size)
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
188 static void map_exec(void *addr, long size)
190 unsigned long start, end;
192 start = (unsigned long)addr;
193 start &= ~(qemu_real_host_page_size - 1);
195 end = (unsigned long)addr + size;
196 end += qemu_real_host_page_size - 1;
197 end &= ~(qemu_real_host_page_size - 1);
199 mprotect((void *)start, end - start,
200 PROT_READ | PROT_WRITE | PROT_EXEC);
204 static void page_init(void)
206 /* NOTE: we can always suppose that qemu_host_page_size >=
210 SYSTEM_INFO system_info;
213 GetSystemInfo(&system_info);
214 qemu_real_host_page_size = system_info.dwPageSize;
217 qemu_real_host_page_size = getpagesize();
219 if (qemu_host_page_size == 0)
220 qemu_host_page_size = qemu_real_host_page_size;
221 if (qemu_host_page_size < TARGET_PAGE_SIZE)
222 qemu_host_page_size = TARGET_PAGE_SIZE;
223 qemu_host_page_bits = 0;
224 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
225 qemu_host_page_bits++;
226 qemu_host_page_mask = ~(qemu_host_page_size - 1);
227 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
228 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 long long startaddr, endaddr;
236 f = fopen("/proc/self/maps", "r");
239 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 startaddr = MIN(startaddr,
242 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
243 endaddr = MIN(endaddr,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
246 TARGET_PAGE_ALIGN(endaddr),
256 static inline PageDesc *page_find_alloc(target_ulong index)
260 lp = &l1_map[index >> L2_BITS];
263 /* allocate if not found */
264 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
265 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
268 return p + (index & (L2_SIZE - 1));
271 static inline PageDesc *page_find(target_ulong index)
275 p = l1_map[index >> L2_BITS];
278 return p + (index & (L2_SIZE - 1));
281 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
286 p = (void **)l1_phys_map;
287 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
289 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
290 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
295 /* allocate if not found */
298 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
299 memset(p, 0, sizeof(void *) * L1_SIZE);
303 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
307 /* allocate if not found */
310 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312 for (i = 0; i < L2_SIZE; i++)
313 pd[i].phys_offset = IO_MEM_UNASSIGNED;
315 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
318 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
320 return phys_page_find_alloc(index, 0);
323 #if !defined(CONFIG_USER_ONLY)
324 static void tlb_protect_code(ram_addr_t ram_addr);
325 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
329 void code_gen_alloc(unsigned long tb_size)
331 code_gen_buffer_size = tb_size;
332 if (code_gen_buffer_size == 0) {
333 /* XXX: needs ajustments */
334 code_gen_buffer_size = (int)(phys_ram_size / 4);
336 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
337 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
338 /* The code gen buffer location may have constraints depending on
339 the host cpu and OS */
340 #if defined(__linux__)
343 flags = MAP_PRIVATE | MAP_ANONYMOUS;
344 #if defined(__x86_64__)
346 /* Cannot map more than that */
347 if (code_gen_buffer_size > (800 * 1024 * 1024))
348 code_gen_buffer_size = (800 * 1024 * 1024);
350 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
351 PROT_WRITE | PROT_READ | PROT_EXEC,
353 if (code_gen_buffer == MAP_FAILED) {
354 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
359 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
360 if (!code_gen_buffer) {
361 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
364 map_exec(code_gen_buffer, code_gen_buffer_size);
366 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
367 code_gen_buffer_max_size = code_gen_buffer_size -
368 code_gen_max_block_size();
369 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
370 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
373 /* Must be called before using the QEMU cpus. 'tb_size' is the size
374 (in bytes) allocated to the translation buffer. Zero means default
376 void cpu_exec_init_all(unsigned long tb_size)
380 code_gen_alloc(tb_size);
381 code_gen_ptr = code_gen_buffer;
385 void cpu_exec_init(CPUState *env)
390 env->next_cpu = NULL;
393 while (*penv != NULL) {
394 penv = (CPUState **)&(*penv)->next_cpu;
397 env->cpu_index = cpu_index;
398 env->nb_watchpoints = 0;
402 static inline void invalidate_page_bitmap(PageDesc *p)
404 if (p->code_bitmap) {
405 qemu_free(p->code_bitmap);
406 p->code_bitmap = NULL;
408 p->code_write_count = 0;
411 /* set to NULL all the 'first_tb' fields in all PageDescs */
412 static void page_flush_tb(void)
417 for(i = 0; i < L1_SIZE; i++) {
420 for(j = 0; j < L2_SIZE; j++) {
422 invalidate_page_bitmap(p);
429 /* flush all the translation blocks */
430 /* XXX: tb_flush is currently not thread safe */
431 void tb_flush(CPUState *env1)
434 #if defined(DEBUG_FLUSH)
435 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
436 (unsigned long)(code_gen_ptr - code_gen_buffer),
438 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
440 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
441 cpu_abort(env1, "Internal error: code buffer overflow\n");
445 for(env = first_cpu; env != NULL; env = env->next_cpu) {
446 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
449 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
452 code_gen_ptr = code_gen_buffer;
453 /* XXX: flush processor icache at this point if cache flush is
458 #ifdef DEBUG_TB_CHECK
460 static void tb_invalidate_check(target_ulong address)
462 TranslationBlock *tb;
464 address &= TARGET_PAGE_MASK;
465 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
466 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
467 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
468 address >= tb->pc + tb->size)) {
469 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
470 address, (long)tb->pc, tb->size);
476 /* verify that all the pages have correct rights for code */
477 static void tb_page_check(void)
479 TranslationBlock *tb;
480 int i, flags1, flags2;
482 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
483 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
484 flags1 = page_get_flags(tb->pc);
485 flags2 = page_get_flags(tb->pc + tb->size - 1);
486 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
487 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
488 (long)tb->pc, tb->size, flags1, flags2);
494 void tb_jmp_check(TranslationBlock *tb)
496 TranslationBlock *tb1;
499 /* suppress any remaining jumps to this TB */
503 tb1 = (TranslationBlock *)((long)tb1 & ~3);
506 tb1 = tb1->jmp_next[n1];
508 /* check end of list */
510 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
516 /* invalidate one TB */
517 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
520 TranslationBlock *tb1;
524 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
527 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
531 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
533 TranslationBlock *tb1;
539 tb1 = (TranslationBlock *)((long)tb1 & ~3);
541 *ptb = tb1->page_next[n1];
544 ptb = &tb1->page_next[n1];
548 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
550 TranslationBlock *tb1, **ptb;
553 ptb = &tb->jmp_next[n];
556 /* find tb(n) in circular list */
560 tb1 = (TranslationBlock *)((long)tb1 & ~3);
561 if (n1 == n && tb1 == tb)
564 ptb = &tb1->jmp_first;
566 ptb = &tb1->jmp_next[n1];
569 /* now we can suppress tb(n) from the list */
570 *ptb = tb->jmp_next[n];
572 tb->jmp_next[n] = NULL;
576 /* reset the jump entry 'n' of a TB so that it is not chained to
578 static inline void tb_reset_jump(TranslationBlock *tb, int n)
580 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
583 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
588 target_phys_addr_t phys_pc;
589 TranslationBlock *tb1, *tb2;
591 /* remove the TB from the hash list */
592 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
593 h = tb_phys_hash_func(phys_pc);
594 tb_remove(&tb_phys_hash[h], tb,
595 offsetof(TranslationBlock, phys_hash_next));
597 /* remove the TB from the page list */
598 if (tb->page_addr[0] != page_addr) {
599 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
600 tb_page_remove(&p->first_tb, tb);
601 invalidate_page_bitmap(p);
603 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
604 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
605 tb_page_remove(&p->first_tb, tb);
606 invalidate_page_bitmap(p);
609 tb_invalidated_flag = 1;
611 /* remove the TB from the hash list */
612 h = tb_jmp_cache_hash_func(tb->pc);
613 for(env = first_cpu; env != NULL; env = env->next_cpu) {
614 if (env->tb_jmp_cache[h] == tb)
615 env->tb_jmp_cache[h] = NULL;
618 /* suppress this TB from the two jump lists */
619 tb_jmp_remove(tb, 0);
620 tb_jmp_remove(tb, 1);
622 /* suppress any remaining jumps to this TB */
628 tb1 = (TranslationBlock *)((long)tb1 & ~3);
629 tb2 = tb1->jmp_next[n1];
630 tb_reset_jump(tb1, n1);
631 tb1->jmp_next[n1] = NULL;
634 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
636 tb_phys_invalidate_count++;
639 static inline void set_bits(uint8_t *tab, int start, int len)
645 mask = 0xff << (start & 7);
646 if ((start & ~7) == (end & ~7)) {
648 mask &= ~(0xff << (end & 7));
653 start = (start + 8) & ~7;
655 while (start < end1) {
660 mask = ~(0xff << (end & 7));
666 static void build_page_bitmap(PageDesc *p)
668 int n, tb_start, tb_end;
669 TranslationBlock *tb;
671 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
674 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
679 tb = (TranslationBlock *)((long)tb & ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start = tb->pc & ~TARGET_PAGE_MASK;
685 tb_end = tb_start + tb->size;
686 if (tb_end > TARGET_PAGE_SIZE)
687 tb_end = TARGET_PAGE_SIZE;
690 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
692 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
693 tb = tb->page_next[n];
697 #ifdef TARGET_HAS_PRECISE_SMC
699 static void tb_gen_code(CPUState *env,
700 target_ulong pc, target_ulong cs_base, int flags,
703 TranslationBlock *tb;
705 target_ulong phys_pc, phys_page2, virt_page2;
708 phys_pc = get_phys_addr_code(env, pc);
711 /* flush must be done */
713 /* cannot fail at this point */
716 tc_ptr = code_gen_ptr;
718 tb->cs_base = cs_base;
721 cpu_gen_code(env, tb, &code_gen_size);
722 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
724 /* check next page if needed */
725 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
727 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
728 phys_page2 = get_phys_addr_code(env, virt_page2);
730 tb_link_phys(tb, phys_pc, phys_page2);
734 /* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
739 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
740 int is_cpu_write_access)
742 int n, current_tb_modified, current_tb_not_found, current_flags;
743 CPUState *env = cpu_single_env;
745 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
746 target_ulong tb_start, tb_end;
747 target_ulong current_pc, current_cs_base;
749 p = page_find(start >> TARGET_PAGE_BITS);
752 if (!p->code_bitmap &&
753 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
754 is_cpu_write_access) {
755 /* build code bitmap */
756 build_page_bitmap(p);
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
761 current_tb_not_found = is_cpu_write_access;
762 current_tb_modified = 0;
763 current_tb = NULL; /* avoid warning */
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
770 tb = (TranslationBlock *)((long)tb & ~3);
771 tb_next = tb->page_next[n];
772 /* NOTE: this is subtle as a TB may span two physical pages */
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
777 tb_end = tb_start + tb->size;
779 tb_start = tb->page_addr[1];
780 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
782 if (!(tb_end <= start || tb_start >= end)) {
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found) {
785 current_tb_not_found = 0;
787 if (env->mem_write_pc) {
788 /* now we have a real cpu fault */
789 current_tb = tb_find_pc(env->mem_write_pc);
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
800 current_tb_modified = 1;
801 cpu_restore_state(current_tb, env,
802 env->mem_write_pc, NULL);
803 #if defined(TARGET_I386)
804 current_flags = env->hflags;
805 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
806 current_cs_base = (target_ulong)env->segs[R_CS].base;
807 current_pc = current_cs_base + env->eip;
809 #error unsupported CPU
812 #endif /* TARGET_HAS_PRECISE_SMC */
813 /* we need to do that to handle the case where a signal
814 occurs while doing tb_phys_invalidate() */
817 saved_tb = env->current_tb;
818 env->current_tb = NULL;
820 tb_phys_invalidate(tb, -1);
822 env->current_tb = saved_tb;
823 if (env->interrupt_request && env->current_tb)
824 cpu_interrupt(env, env->interrupt_request);
829 #if !defined(CONFIG_USER_ONLY)
830 /* if no code remaining, no need to continue to use slow writes */
832 invalidate_page_bitmap(p);
833 if (is_cpu_write_access) {
834 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
838 #ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
843 env->current_tb = NULL;
844 tb_gen_code(env, current_pc, current_cs_base, current_flags,
846 cpu_resume_from_signal(env, NULL);
851 /* len must be <= 8 and start must be a multiple of len */
852 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
859 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
860 cpu_single_env->mem_write_vaddr, len,
862 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
866 p = page_find(start >> TARGET_PAGE_BITS);
869 if (p->code_bitmap) {
870 offset = start & ~TARGET_PAGE_MASK;
871 b = p->code_bitmap[offset >> 3] >> (offset & 7);
872 if (b & ((1 << len) - 1))
876 tb_invalidate_phys_page_range(start, start + len, 1);
880 #if !defined(CONFIG_SOFTMMU)
881 static void tb_invalidate_phys_page(target_phys_addr_t addr,
882 unsigned long pc, void *puc)
884 int n, current_flags, current_tb_modified;
885 target_ulong current_pc, current_cs_base;
887 TranslationBlock *tb, *current_tb;
888 #ifdef TARGET_HAS_PRECISE_SMC
889 CPUState *env = cpu_single_env;
892 addr &= TARGET_PAGE_MASK;
893 p = page_find(addr >> TARGET_PAGE_BITS);
897 current_tb_modified = 0;
899 current_pc = 0; /* avoid warning */
900 current_cs_base = 0; /* avoid warning */
901 current_flags = 0; /* avoid warning */
902 #ifdef TARGET_HAS_PRECISE_SMC
904 current_tb = tb_find_pc(pc);
909 tb = (TranslationBlock *)((long)tb & ~3);
910 #ifdef TARGET_HAS_PRECISE_SMC
911 if (current_tb == tb &&
912 !(current_tb->cflags & CF_SINGLE_INSN)) {
913 /* If we are modifying the current TB, we must stop
914 its execution. We could be more precise by checking
915 that the modification is after the current PC, but it
916 would require a specialized function to partially
917 restore the CPU state */
919 current_tb_modified = 1;
920 cpu_restore_state(current_tb, env, pc, puc);
921 #if defined(TARGET_I386)
922 current_flags = env->hflags;
923 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
924 current_cs_base = (target_ulong)env->segs[R_CS].base;
925 current_pc = current_cs_base + env->eip;
927 #error unsupported CPU
930 #endif /* TARGET_HAS_PRECISE_SMC */
931 tb_phys_invalidate(tb, addr);
932 tb = tb->page_next[n];
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_modified) {
937 /* we generate a block containing just the instruction
938 modifying the memory. It will ensure that it cannot modify
940 env->current_tb = NULL;
941 tb_gen_code(env, current_pc, current_cs_base, current_flags,
943 cpu_resume_from_signal(env, puc);
949 /* add the tb in the target page and protect it if necessary */
950 static inline void tb_alloc_page(TranslationBlock *tb,
951 unsigned int n, target_ulong page_addr)
954 TranslationBlock *last_first_tb;
956 tb->page_addr[n] = page_addr;
957 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
958 tb->page_next[n] = p->first_tb;
959 last_first_tb = p->first_tb;
960 p->first_tb = (TranslationBlock *)((long)tb | n);
961 invalidate_page_bitmap(p);
963 #if defined(TARGET_HAS_SMC) || 1
965 #if defined(CONFIG_USER_ONLY)
966 if (p->flags & PAGE_WRITE) {
971 /* force the host page as non writable (writes will have a
972 page fault + mprotect overhead) */
973 page_addr &= qemu_host_page_mask;
975 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
976 addr += TARGET_PAGE_SIZE) {
978 p2 = page_find (addr >> TARGET_PAGE_BITS);
982 p2->flags &= ~PAGE_WRITE;
983 page_get_flags(addr);
985 mprotect(g2h(page_addr), qemu_host_page_size,
986 (prot & PAGE_BITS) & ~PAGE_WRITE);
987 #ifdef DEBUG_TB_INVALIDATE
988 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
993 /* if some code is already present, then the pages are already
994 protected. So we handle the case where only the first TB is
995 allocated in a physical page */
996 if (!last_first_tb) {
997 tlb_protect_code(page_addr);
1001 #endif /* TARGET_HAS_SMC */
1004 /* Allocate a new translation block. Flush the translation buffer if
1005 too many translation blocks or too much generated code. */
1006 TranslationBlock *tb_alloc(target_ulong pc)
1008 TranslationBlock *tb;
1010 if (nb_tbs >= code_gen_max_blocks ||
1011 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1013 tb = &tbs[nb_tbs++];
1019 /* add a new TB and link it to the physical page tables. phys_page2 is
1020 (-1) to indicate that only one page contains the TB. */
1021 void tb_link_phys(TranslationBlock *tb,
1022 target_ulong phys_pc, target_ulong phys_page2)
1025 TranslationBlock **ptb;
1027 /* add in the physical hash table */
1028 h = tb_phys_hash_func(phys_pc);
1029 ptb = &tb_phys_hash[h];
1030 tb->phys_hash_next = *ptb;
1033 /* add in the page list */
1034 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1035 if (phys_page2 != -1)
1036 tb_alloc_page(tb, 1, phys_page2);
1038 tb->page_addr[1] = -1;
1040 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1041 tb->jmp_next[0] = NULL;
1042 tb->jmp_next[1] = NULL;
1044 /* init original jump addresses */
1045 if (tb->tb_next_offset[0] != 0xffff)
1046 tb_reset_jump(tb, 0);
1047 if (tb->tb_next_offset[1] != 0xffff)
1048 tb_reset_jump(tb, 1);
1050 #ifdef DEBUG_TB_CHECK
1055 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1056 tb[1].tc_ptr. Return NULL if not found */
1057 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1059 int m_min, m_max, m;
1061 TranslationBlock *tb;
1065 if (tc_ptr < (unsigned long)code_gen_buffer ||
1066 tc_ptr >= (unsigned long)code_gen_ptr)
1068 /* binary search (cf Knuth) */
1071 while (m_min <= m_max) {
1072 m = (m_min + m_max) >> 1;
1074 v = (unsigned long)tb->tc_ptr;
1077 else if (tc_ptr < v) {
1086 static void tb_reset_jump_recursive(TranslationBlock *tb);
1088 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1090 TranslationBlock *tb1, *tb_next, **ptb;
1093 tb1 = tb->jmp_next[n];
1095 /* find head of list */
1098 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1101 tb1 = tb1->jmp_next[n1];
1103 /* we are now sure now that tb jumps to tb1 */
1106 /* remove tb from the jmp_first list */
1107 ptb = &tb_next->jmp_first;
1111 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1112 if (n1 == n && tb1 == tb)
1114 ptb = &tb1->jmp_next[n1];
1116 *ptb = tb->jmp_next[n];
1117 tb->jmp_next[n] = NULL;
1119 /* suppress the jump to next tb in generated code */
1120 tb_reset_jump(tb, n);
1122 /* suppress jumps in the tb on which we could have jumped */
1123 tb_reset_jump_recursive(tb_next);
1127 static void tb_reset_jump_recursive(TranslationBlock *tb)
1129 tb_reset_jump_recursive2(tb, 0);
1130 tb_reset_jump_recursive2(tb, 1);
1133 #if defined(TARGET_HAS_ICE)
1134 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1136 target_phys_addr_t addr;
1138 ram_addr_t ram_addr;
1141 addr = cpu_get_phys_page_debug(env, pc);
1142 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1144 pd = IO_MEM_UNASSIGNED;
1146 pd = p->phys_offset;
1148 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1149 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1153 /* Add a watchpoint. */
1154 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1158 for (i = 0; i < env->nb_watchpoints; i++) {
1159 if (addr == env->watchpoint[i].vaddr)
1162 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1165 i = env->nb_watchpoints++;
1166 env->watchpoint[i].vaddr = addr;
1167 tlb_flush_page(env, addr);
1168 /* FIXME: This flush is needed because of the hack to make memory ops
1169 terminate the TB. It can be removed once the proper IO trap and
1170 re-execute bits are in. */
1175 /* Remove a watchpoint. */
1176 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1180 for (i = 0; i < env->nb_watchpoints; i++) {
1181 if (addr == env->watchpoint[i].vaddr) {
1182 env->nb_watchpoints--;
1183 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1184 tlb_flush_page(env, addr);
1191 /* Remove all watchpoints. */
1192 void cpu_watchpoint_remove_all(CPUState *env) {
1195 for (i = 0; i < env->nb_watchpoints; i++) {
1196 tlb_flush_page(env, env->watchpoint[i].vaddr);
1198 env->nb_watchpoints = 0;
1201 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1202 breakpoint is reached */
1203 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1205 #if defined(TARGET_HAS_ICE)
1208 for(i = 0; i < env->nb_breakpoints; i++) {
1209 if (env->breakpoints[i] == pc)
1213 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1215 env->breakpoints[env->nb_breakpoints++] = pc;
1217 breakpoint_invalidate(env, pc);
1224 /* remove all breakpoints */
1225 void cpu_breakpoint_remove_all(CPUState *env) {
1226 #if defined(TARGET_HAS_ICE)
1228 for(i = 0; i < env->nb_breakpoints; i++) {
1229 breakpoint_invalidate(env, env->breakpoints[i]);
1231 env->nb_breakpoints = 0;
1235 /* remove a breakpoint */
1236 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1238 #if defined(TARGET_HAS_ICE)
1240 for(i = 0; i < env->nb_breakpoints; i++) {
1241 if (env->breakpoints[i] == pc)
1246 env->nb_breakpoints--;
1247 if (i < env->nb_breakpoints)
1248 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1250 breakpoint_invalidate(env, pc);
1257 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1258 CPU loop after each instruction */
1259 void cpu_single_step(CPUState *env, int enabled)
1261 #if defined(TARGET_HAS_ICE)
1262 if (env->singlestep_enabled != enabled) {
1263 env->singlestep_enabled = enabled;
1264 /* must flush all the translated code to avoid inconsistancies */
1265 /* XXX: only flush what is necessary */
1271 /* enable or disable low levels log */
1272 void cpu_set_log(int log_flags)
1274 loglevel = log_flags;
1275 if (loglevel && !logfile) {
1276 logfile = fopen(logfilename, log_append ? "a" : "w");
1278 perror(logfilename);
1281 #if !defined(CONFIG_SOFTMMU)
1282 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1284 static uint8_t logfile_buf[4096];
1285 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1288 setvbuf(logfile, NULL, _IOLBF, 0);
1292 if (!loglevel && logfile) {
1298 void cpu_set_log_filename(const char *filename)
1300 logfilename = strdup(filename);
1305 cpu_set_log(loglevel);
1308 /* mask must never be zero, except for A20 change call */
1309 void cpu_interrupt(CPUState *env, int mask)
1311 TranslationBlock *tb;
1312 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1314 env->interrupt_request |= mask;
1315 /* if the cpu is currently executing code, we must unlink it and
1316 all the potentially executing TB */
1317 tb = env->current_tb;
1318 if (tb && !testandset(&interrupt_lock)) {
1319 env->current_tb = NULL;
1320 tb_reset_jump_recursive(tb);
1321 resetlock(&interrupt_lock);
1325 void cpu_reset_interrupt(CPUState *env, int mask)
1327 env->interrupt_request &= ~mask;
1330 CPULogItem cpu_log_items[] = {
1331 { CPU_LOG_TB_OUT_ASM, "out_asm",
1332 "show generated host assembly code for each compiled TB" },
1333 { CPU_LOG_TB_IN_ASM, "in_asm",
1334 "show target assembly code for each compiled TB" },
1335 { CPU_LOG_TB_OP, "op",
1336 "show micro ops for each compiled TB" },
1337 { CPU_LOG_TB_OP_OPT, "op_opt",
1340 "before eflags optimization and "
1342 "after liveness analysis" },
1343 { CPU_LOG_INT, "int",
1344 "show interrupts/exceptions in short format" },
1345 { CPU_LOG_EXEC, "exec",
1346 "show trace before each executed TB (lots of logs)" },
1347 { CPU_LOG_TB_CPU, "cpu",
1348 "show CPU state before block translation" },
1350 { CPU_LOG_PCALL, "pcall",
1351 "show protected mode far calls/returns/exceptions" },
1354 { CPU_LOG_IOPORT, "ioport",
1355 "show all i/o ports accesses" },
1360 static int cmp1(const char *s1, int n, const char *s2)
1362 if (strlen(s2) != n)
1364 return memcmp(s1, s2, n) == 0;
1367 /* takes a comma separated list of log masks. Return 0 if error. */
1368 int cpu_str_to_log_mask(const char *str)
1377 p1 = strchr(p, ',');
1380 if(cmp1(p,p1-p,"all")) {
1381 for(item = cpu_log_items; item->mask != 0; item++) {
1385 for(item = cpu_log_items; item->mask != 0; item++) {
1386 if (cmp1(p, p1 - p, item->name))
1400 void cpu_abort(CPUState *env, const char *fmt, ...)
1407 fprintf(stderr, "qemu: fatal: ");
1408 vfprintf(stderr, fmt, ap);
1409 fprintf(stderr, "\n");
1411 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1413 cpu_dump_state(env, stderr, fprintf, 0);
1416 fprintf(logfile, "qemu: fatal: ");
1417 vfprintf(logfile, fmt, ap2);
1418 fprintf(logfile, "\n");
1420 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1422 cpu_dump_state(env, logfile, fprintf, 0);
1432 CPUState *cpu_copy(CPUState *env)
1434 CPUState *new_env = cpu_init(env->cpu_model_str);
1435 /* preserve chaining and index */
1436 CPUState *next_cpu = new_env->next_cpu;
1437 int cpu_index = new_env->cpu_index;
1438 memcpy(new_env, env, sizeof(CPUState));
1439 new_env->next_cpu = next_cpu;
1440 new_env->cpu_index = cpu_index;
1444 #if !defined(CONFIG_USER_ONLY)
1446 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1450 /* Discard jump cache entries for any tb which might potentially
1451 overlap the flushed page. */
1452 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1453 memset (&env->tb_jmp_cache[i], 0,
1454 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1456 i = tb_jmp_cache_hash_page(addr);
1457 memset (&env->tb_jmp_cache[i], 0,
1458 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1461 /* NOTE: if flush_global is true, also flush global entries (not
1463 void tlb_flush(CPUState *env, int flush_global)
1467 #if defined(DEBUG_TLB)
1468 printf("tlb_flush:\n");
1470 /* must reset current TB so that interrupts cannot modify the
1471 links while we are modifying them */
1472 env->current_tb = NULL;
1474 for(i = 0; i < CPU_TLB_SIZE; i++) {
1475 env->tlb_table[0][i].addr_read = -1;
1476 env->tlb_table[0][i].addr_write = -1;
1477 env->tlb_table[0][i].addr_code = -1;
1478 env->tlb_table[1][i].addr_read = -1;
1479 env->tlb_table[1][i].addr_write = -1;
1480 env->tlb_table[1][i].addr_code = -1;
1481 #if (NB_MMU_MODES >= 3)
1482 env->tlb_table[2][i].addr_read = -1;
1483 env->tlb_table[2][i].addr_write = -1;
1484 env->tlb_table[2][i].addr_code = -1;
1485 #if (NB_MMU_MODES == 4)
1486 env->tlb_table[3][i].addr_read = -1;
1487 env->tlb_table[3][i].addr_write = -1;
1488 env->tlb_table[3][i].addr_code = -1;
1493 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1495 #if !defined(CONFIG_SOFTMMU)
1496 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1499 if (env->kqemu_enabled) {
1500 kqemu_flush(env, flush_global);
1506 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1508 if (addr == (tlb_entry->addr_read &
1509 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1510 addr == (tlb_entry->addr_write &
1511 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1512 addr == (tlb_entry->addr_code &
1513 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1514 tlb_entry->addr_read = -1;
1515 tlb_entry->addr_write = -1;
1516 tlb_entry->addr_code = -1;
1520 void tlb_flush_page(CPUState *env, target_ulong addr)
1524 #if defined(DEBUG_TLB)
1525 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1527 /* must reset current TB so that interrupts cannot modify the
1528 links while we are modifying them */
1529 env->current_tb = NULL;
1531 addr &= TARGET_PAGE_MASK;
1532 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1533 tlb_flush_entry(&env->tlb_table[0][i], addr);
1534 tlb_flush_entry(&env->tlb_table[1][i], addr);
1535 #if (NB_MMU_MODES >= 3)
1536 tlb_flush_entry(&env->tlb_table[2][i], addr);
1537 #if (NB_MMU_MODES == 4)
1538 tlb_flush_entry(&env->tlb_table[3][i], addr);
1542 tlb_flush_jmp_cache(env, addr);
1544 #if !defined(CONFIG_SOFTMMU)
1545 if (addr < MMAP_AREA_END)
1546 munmap((void *)addr, TARGET_PAGE_SIZE);
1549 if (env->kqemu_enabled) {
1550 kqemu_flush_page(env, addr);
1555 /* update the TLBs so that writes to code in the virtual page 'addr'
1557 static void tlb_protect_code(ram_addr_t ram_addr)
1559 cpu_physical_memory_reset_dirty(ram_addr,
1560 ram_addr + TARGET_PAGE_SIZE,
1564 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1565 tested for self modifying code */
1566 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1569 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1572 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1573 unsigned long start, unsigned long length)
1576 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1577 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1578 if ((addr - start) < length) {
1579 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1584 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1588 unsigned long length, start1;
1592 start &= TARGET_PAGE_MASK;
1593 end = TARGET_PAGE_ALIGN(end);
1595 length = end - start;
1598 len = length >> TARGET_PAGE_BITS;
1600 /* XXX: should not depend on cpu context */
1602 if (env->kqemu_enabled) {
1605 for(i = 0; i < len; i++) {
1606 kqemu_set_notdirty(env, addr);
1607 addr += TARGET_PAGE_SIZE;
1611 mask = ~dirty_flags;
1612 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1613 for(i = 0; i < len; i++)
1616 /* we modify the TLB cache so that the dirty bit will be set again
1617 when accessing the range */
1618 start1 = start + (unsigned long)phys_ram_base;
1619 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1620 for(i = 0; i < CPU_TLB_SIZE; i++)
1621 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1622 for(i = 0; i < CPU_TLB_SIZE; i++)
1623 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1624 #if (NB_MMU_MODES >= 3)
1625 for(i = 0; i < CPU_TLB_SIZE; i++)
1626 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1627 #if (NB_MMU_MODES == 4)
1628 for(i = 0; i < CPU_TLB_SIZE; i++)
1629 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1634 #if !defined(CONFIG_SOFTMMU)
1635 /* XXX: this is expensive */
1641 for(i = 0; i < L1_SIZE; i++) {
1644 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1645 for(j = 0; j < L2_SIZE; j++) {
1646 if (p->valid_tag == virt_valid_tag &&
1647 p->phys_addr >= start && p->phys_addr < end &&
1648 (p->prot & PROT_WRITE)) {
1649 if (addr < MMAP_AREA_END) {
1650 mprotect((void *)addr, TARGET_PAGE_SIZE,
1651 p->prot & ~PROT_WRITE);
1654 addr += TARGET_PAGE_SIZE;
1663 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1665 ram_addr_t ram_addr;
1667 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1668 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1669 tlb_entry->addend - (unsigned long)phys_ram_base;
1670 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1671 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1676 /* update the TLB according to the current state of the dirty bits */
1677 void cpu_tlb_update_dirty(CPUState *env)
1680 for(i = 0; i < CPU_TLB_SIZE; i++)
1681 tlb_update_dirty(&env->tlb_table[0][i]);
1682 for(i = 0; i < CPU_TLB_SIZE; i++)
1683 tlb_update_dirty(&env->tlb_table[1][i]);
1684 #if (NB_MMU_MODES >= 3)
1685 for(i = 0; i < CPU_TLB_SIZE; i++)
1686 tlb_update_dirty(&env->tlb_table[2][i]);
1687 #if (NB_MMU_MODES == 4)
1688 for(i = 0; i < CPU_TLB_SIZE; i++)
1689 tlb_update_dirty(&env->tlb_table[3][i]);
1694 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1695 unsigned long start)
1698 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1699 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1700 if (addr == start) {
1701 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1706 /* update the TLB corresponding to virtual page vaddr and phys addr
1707 addr so that it is no longer dirty */
1708 static inline void tlb_set_dirty(CPUState *env,
1709 unsigned long addr, target_ulong vaddr)
1713 addr &= TARGET_PAGE_MASK;
1714 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1715 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1716 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1717 #if (NB_MMU_MODES >= 3)
1718 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1719 #if (NB_MMU_MODES == 4)
1720 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1725 /* add a new TLB entry. At most one entry for a given virtual address
1726 is permitted. Return 0 if OK or 2 if the page could not be mapped
1727 (can only happen in non SOFTMMU mode for I/O pages or pages
1728 conflicting with the host address space). */
1729 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1730 target_phys_addr_t paddr, int prot,
1731 int mmu_idx, int is_softmmu)
1736 target_ulong address;
1737 target_phys_addr_t addend;
1742 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1744 pd = IO_MEM_UNASSIGNED;
1746 pd = p->phys_offset;
1748 #if defined(DEBUG_TLB)
1749 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1750 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1754 #if !defined(CONFIG_SOFTMMU)
1758 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1759 /* IO memory case */
1760 address = vaddr | pd;
1763 /* standard memory */
1765 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1768 /* Make accesses to pages with watchpoints go via the
1769 watchpoint trap routines. */
1770 for (i = 0; i < env->nb_watchpoints; i++) {
1771 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1772 if (address & ~TARGET_PAGE_MASK) {
1773 env->watchpoint[i].addend = 0;
1774 address = vaddr | io_mem_watch;
1776 env->watchpoint[i].addend = pd - paddr +
1777 (unsigned long) phys_ram_base;
1778 /* TODO: Figure out how to make read watchpoints coexist
1780 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1785 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1787 te = &env->tlb_table[mmu_idx][index];
1788 te->addend = addend;
1789 if (prot & PAGE_READ) {
1790 te->addr_read = address;
1795 if (prot & PAGE_EXEC) {
1796 te->addr_code = address;
1800 if (prot & PAGE_WRITE) {
1801 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1802 (pd & IO_MEM_ROMD)) {
1803 /* write access calls the I/O callback */
1804 te->addr_write = vaddr |
1805 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1806 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1807 !cpu_physical_memory_is_dirty(pd)) {
1808 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1810 te->addr_write = address;
1813 te->addr_write = -1;
1816 #if !defined(CONFIG_SOFTMMU)
1818 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1819 /* IO access: no mapping is done as it will be handled by the
1821 if (!(env->hflags & HF_SOFTMMU_MASK))
1826 if (vaddr >= MMAP_AREA_END) {
1829 if (prot & PROT_WRITE) {
1830 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1831 #if defined(TARGET_HAS_SMC) || 1
1834 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1835 !cpu_physical_memory_is_dirty(pd))) {
1836 /* ROM: we do as if code was inside */
1837 /* if code is present, we only map as read only and save the
1841 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1844 vp->valid_tag = virt_valid_tag;
1845 prot &= ~PAGE_WRITE;
1848 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1849 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1850 if (map_addr == MAP_FAILED) {
1851 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1861 /* called from signal handler: invalidate the code and unprotect the
1862 page. Return TRUE if the fault was succesfully handled. */
1863 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1865 #if !defined(CONFIG_SOFTMMU)
1868 #if defined(DEBUG_TLB)
1869 printf("page_unprotect: addr=0x%08x\n", addr);
1871 addr &= TARGET_PAGE_MASK;
1873 /* if it is not mapped, no need to worry here */
1874 if (addr >= MMAP_AREA_END)
1876 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1879 /* NOTE: in this case, validate_tag is _not_ tested as it
1880 validates only the code TLB */
1881 if (vp->valid_tag != virt_valid_tag)
1883 if (!(vp->prot & PAGE_WRITE))
1885 #if defined(DEBUG_TLB)
1886 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1887 addr, vp->phys_addr, vp->prot);
1889 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1890 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1891 (unsigned long)addr, vp->prot);
1892 /* set the dirty bit */
1893 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1894 /* flush the code inside */
1895 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1904 void tlb_flush(CPUState *env, int flush_global)
1908 void tlb_flush_page(CPUState *env, target_ulong addr)
1912 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1913 target_phys_addr_t paddr, int prot,
1914 int mmu_idx, int is_softmmu)
1919 /* dump memory mappings */
1920 void page_dump(FILE *f)
1922 unsigned long start, end;
1923 int i, j, prot, prot1;
1926 fprintf(f, "%-8s %-8s %-8s %s\n",
1927 "start", "end", "size", "prot");
1931 for(i = 0; i <= L1_SIZE; i++) {
1936 for(j = 0;j < L2_SIZE; j++) {
1941 if (prot1 != prot) {
1942 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1944 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1945 start, end, end - start,
1946 prot & PAGE_READ ? 'r' : '-',
1947 prot & PAGE_WRITE ? 'w' : '-',
1948 prot & PAGE_EXEC ? 'x' : '-');
1962 int page_get_flags(target_ulong address)
1966 p = page_find(address >> TARGET_PAGE_BITS);
1972 /* modify the flags of a page and invalidate the code if
1973 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1974 depending on PAGE_WRITE */
1975 void page_set_flags(target_ulong start, target_ulong end, int flags)
1980 start = start & TARGET_PAGE_MASK;
1981 end = TARGET_PAGE_ALIGN(end);
1982 if (flags & PAGE_WRITE)
1983 flags |= PAGE_WRITE_ORG;
1984 spin_lock(&tb_lock);
1985 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1986 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1987 /* if the write protection is set, then we invalidate the code
1989 if (!(p->flags & PAGE_WRITE) &&
1990 (flags & PAGE_WRITE) &&
1992 tb_invalidate_phys_page(addr, 0, NULL);
1996 spin_unlock(&tb_lock);
1999 int page_check_range(target_ulong start, target_ulong len, int flags)
2005 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2006 start = start & TARGET_PAGE_MASK;
2009 /* we've wrapped around */
2011 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2012 p = page_find(addr >> TARGET_PAGE_BITS);
2015 if( !(p->flags & PAGE_VALID) )
2018 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2020 if (flags & PAGE_WRITE) {
2021 if (!(p->flags & PAGE_WRITE_ORG))
2023 /* unprotect the page if it was put read-only because it
2024 contains translated code */
2025 if (!(p->flags & PAGE_WRITE)) {
2026 if (!page_unprotect(addr, 0, NULL))
2035 /* called from signal handler: invalidate the code and unprotect the
2036 page. Return TRUE if the fault was succesfully handled. */
2037 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2039 unsigned int page_index, prot, pindex;
2041 target_ulong host_start, host_end, addr;
2043 host_start = address & qemu_host_page_mask;
2044 page_index = host_start >> TARGET_PAGE_BITS;
2045 p1 = page_find(page_index);
2048 host_end = host_start + qemu_host_page_size;
2051 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2055 /* if the page was really writable, then we change its
2056 protection back to writable */
2057 if (prot & PAGE_WRITE_ORG) {
2058 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2059 if (!(p1[pindex].flags & PAGE_WRITE)) {
2060 mprotect((void *)g2h(host_start), qemu_host_page_size,
2061 (prot & PAGE_BITS) | PAGE_WRITE);
2062 p1[pindex].flags |= PAGE_WRITE;
2063 /* and since the content will be modified, we must invalidate
2064 the corresponding translated code. */
2065 tb_invalidate_phys_page(address, pc, puc);
2066 #ifdef DEBUG_TB_CHECK
2067 tb_invalidate_check(address);
2075 static inline void tlb_set_dirty(CPUState *env,
2076 unsigned long addr, target_ulong vaddr)
2079 #endif /* defined(CONFIG_USER_ONLY) */
2081 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2083 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2084 ram_addr_t orig_memory);
2085 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2088 if (addr > start_addr) \
2091 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2092 if (start_addr2 > 0) \
2096 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2097 end_addr2 = TARGET_PAGE_SIZE - 1; \
2099 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2100 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2105 /* register physical memory. 'size' must be a multiple of the target
2106 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2108 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2110 ram_addr_t phys_offset)
2112 target_phys_addr_t addr, end_addr;
2115 ram_addr_t orig_size = size;
2118 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2119 end_addr = start_addr + (target_phys_addr_t)size;
2120 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2121 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2122 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2123 ram_addr_t orig_memory = p->phys_offset;
2124 target_phys_addr_t start_addr2, end_addr2;
2125 int need_subpage = 0;
2127 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2129 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2130 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2131 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2132 &p->phys_offset, orig_memory);
2134 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2137 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2139 p->phys_offset = phys_offset;
2140 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2141 (phys_offset & IO_MEM_ROMD))
2142 phys_offset += TARGET_PAGE_SIZE;
2145 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2146 p->phys_offset = phys_offset;
2147 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2148 (phys_offset & IO_MEM_ROMD))
2149 phys_offset += TARGET_PAGE_SIZE;
2151 target_phys_addr_t start_addr2, end_addr2;
2152 int need_subpage = 0;
2154 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2155 end_addr2, need_subpage);
2157 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2158 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2159 &p->phys_offset, IO_MEM_UNASSIGNED);
2160 subpage_register(subpage, start_addr2, end_addr2,
2167 /* since each CPU stores ram addresses in its TLB cache, we must
2168 reset the modified entries */
2170 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2175 /* XXX: temporary until new memory mapping API */
2176 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2182 return IO_MEM_UNASSIGNED;
2183 return p->phys_offset;
2186 /* XXX: better than nothing */
2187 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2190 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2191 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2192 (uint64_t)size, (uint64_t)phys_ram_size);
2195 addr = phys_ram_alloc_offset;
2196 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2200 void qemu_ram_free(ram_addr_t addr)
2204 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2206 #ifdef DEBUG_UNASSIGNED
2207 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2210 do_unassigned_access(addr, 0, 0, 0);
2212 do_unassigned_access(addr, 0, 0, 0);
2217 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2219 #ifdef DEBUG_UNASSIGNED
2220 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2223 do_unassigned_access(addr, 1, 0, 0);
2225 do_unassigned_access(addr, 1, 0, 0);
2229 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2230 unassigned_mem_readb,
2231 unassigned_mem_readb,
2232 unassigned_mem_readb,
2235 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2236 unassigned_mem_writeb,
2237 unassigned_mem_writeb,
2238 unassigned_mem_writeb,
2241 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2243 unsigned long ram_addr;
2245 ram_addr = addr - (unsigned long)phys_ram_base;
2246 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2247 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2248 #if !defined(CONFIG_USER_ONLY)
2249 tb_invalidate_phys_page_fast(ram_addr, 1);
2250 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2253 stb_p((uint8_t *)(long)addr, val);
2255 if (cpu_single_env->kqemu_enabled &&
2256 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2257 kqemu_modify_page(cpu_single_env, ram_addr);
2259 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2260 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2261 /* we remove the notdirty callback only if the code has been
2263 if (dirty_flags == 0xff)
2264 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2267 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2269 unsigned long ram_addr;
2271 ram_addr = addr - (unsigned long)phys_ram_base;
2272 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2273 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2274 #if !defined(CONFIG_USER_ONLY)
2275 tb_invalidate_phys_page_fast(ram_addr, 2);
2276 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2279 stw_p((uint8_t *)(long)addr, val);
2281 if (cpu_single_env->kqemu_enabled &&
2282 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2283 kqemu_modify_page(cpu_single_env, ram_addr);
2285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2286 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2287 /* we remove the notdirty callback only if the code has been
2289 if (dirty_flags == 0xff)
2290 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2293 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2295 unsigned long ram_addr;
2297 ram_addr = addr - (unsigned long)phys_ram_base;
2298 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2299 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2300 #if !defined(CONFIG_USER_ONLY)
2301 tb_invalidate_phys_page_fast(ram_addr, 4);
2302 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2305 stl_p((uint8_t *)(long)addr, val);
2307 if (cpu_single_env->kqemu_enabled &&
2308 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2309 kqemu_modify_page(cpu_single_env, ram_addr);
2311 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2312 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2313 /* we remove the notdirty callback only if the code has been
2315 if (dirty_flags == 0xff)
2316 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2319 static CPUReadMemoryFunc *error_mem_read[3] = {
2320 NULL, /* never used */
2321 NULL, /* never used */
2322 NULL, /* never used */
2325 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2326 notdirty_mem_writeb,
2327 notdirty_mem_writew,
2328 notdirty_mem_writel,
2331 #if defined(CONFIG_SOFTMMU)
2332 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2333 so these check for a hit then pass through to the normal out-of-line
2335 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2337 return ldub_phys(addr);
2340 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2342 return lduw_phys(addr);
2345 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2347 return ldl_phys(addr);
2350 /* Generate a debug exception if a watchpoint has been hit.
2351 Returns the real physical address of the access. addr will be a host
2352 address in case of a RAM location. */
2353 static target_ulong check_watchpoint(target_phys_addr_t addr)
2355 CPUState *env = cpu_single_env;
2357 target_ulong retaddr;
2361 for (i = 0; i < env->nb_watchpoints; i++) {
2362 watch = env->watchpoint[i].vaddr;
2363 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2364 retaddr = addr - env->watchpoint[i].addend;
2365 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2366 cpu_single_env->watchpoint_hit = i + 1;
2367 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2375 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2378 addr = check_watchpoint(addr);
2379 stb_phys(addr, val);
2382 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2385 addr = check_watchpoint(addr);
2386 stw_phys(addr, val);
2389 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2392 addr = check_watchpoint(addr);
2393 stl_phys(addr, val);
2396 static CPUReadMemoryFunc *watch_mem_read[3] = {
2402 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2409 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2415 idx = SUBPAGE_IDX(addr - mmio->base);
2416 #if defined(DEBUG_SUBPAGE)
2417 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2418 mmio, len, addr, idx);
2420 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2425 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2426 uint32_t value, unsigned int len)
2430 idx = SUBPAGE_IDX(addr - mmio->base);
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2433 mmio, len, addr, idx, value);
2435 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2438 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2444 return subpage_readlen(opaque, addr, 0);
2447 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2450 #if defined(DEBUG_SUBPAGE)
2451 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2453 subpage_writelen(opaque, addr, value, 0);
2456 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2458 #if defined(DEBUG_SUBPAGE)
2459 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2462 return subpage_readlen(opaque, addr, 1);
2465 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2468 #if defined(DEBUG_SUBPAGE)
2469 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2471 subpage_writelen(opaque, addr, value, 1);
2474 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2476 #if defined(DEBUG_SUBPAGE)
2477 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2480 return subpage_readlen(opaque, addr, 2);
2483 static void subpage_writel (void *opaque,
2484 target_phys_addr_t addr, uint32_t value)
2486 #if defined(DEBUG_SUBPAGE)
2487 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2489 subpage_writelen(opaque, addr, value, 2);
2492 static CPUReadMemoryFunc *subpage_read[] = {
2498 static CPUWriteMemoryFunc *subpage_write[] = {
2504 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2510 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2512 idx = SUBPAGE_IDX(start);
2513 eidx = SUBPAGE_IDX(end);
2514 #if defined(DEBUG_SUBPAGE)
2515 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2516 mmio, start, end, idx, eidx, memory);
2518 memory >>= IO_MEM_SHIFT;
2519 for (; idx <= eidx; idx++) {
2520 for (i = 0; i < 4; i++) {
2521 if (io_mem_read[memory][i]) {
2522 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2523 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2525 if (io_mem_write[memory][i]) {
2526 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2527 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2535 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2536 ram_addr_t orig_memory)
2541 mmio = qemu_mallocz(sizeof(subpage_t));
2544 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2545 #if defined(DEBUG_SUBPAGE)
2546 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2547 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2549 *phys = subpage_memory | IO_MEM_SUBPAGE;
2550 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2556 static void io_mem_init(void)
2558 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2559 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2560 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2563 #if defined(CONFIG_SOFTMMU)
2564 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2565 watch_mem_write, NULL);
2567 /* alloc dirty bits array */
2568 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2569 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2572 /* mem_read and mem_write are arrays of functions containing the
2573 function to access byte (index 0), word (index 1) and dword (index
2574 2). Functions can be omitted with a NULL function pointer. The
2575 registered functions may be modified dynamically later.
2576 If io_index is non zero, the corresponding io zone is
2577 modified. If it is zero, a new io zone is allocated. The return
2578 value can be used with cpu_register_physical_memory(). (-1) is
2579 returned if error. */
2580 int cpu_register_io_memory(int io_index,
2581 CPUReadMemoryFunc **mem_read,
2582 CPUWriteMemoryFunc **mem_write,
2585 int i, subwidth = 0;
2587 if (io_index <= 0) {
2588 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2590 io_index = io_mem_nb++;
2592 if (io_index >= IO_MEM_NB_ENTRIES)
2596 for(i = 0;i < 3; i++) {
2597 if (!mem_read[i] || !mem_write[i])
2598 subwidth = IO_MEM_SUBWIDTH;
2599 io_mem_read[io_index][i] = mem_read[i];
2600 io_mem_write[io_index][i] = mem_write[i];
2602 io_mem_opaque[io_index] = opaque;
2603 return (io_index << IO_MEM_SHIFT) | subwidth;
2606 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2608 return io_mem_write[io_index >> IO_MEM_SHIFT];
2611 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2613 return io_mem_read[io_index >> IO_MEM_SHIFT];
2616 /* physical memory access (slow version, mainly for debug) */
2617 #if defined(CONFIG_USER_ONLY)
2618 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2619 int len, int is_write)
2626 page = addr & TARGET_PAGE_MASK;
2627 l = (page + TARGET_PAGE_SIZE) - addr;
2630 flags = page_get_flags(page);
2631 if (!(flags & PAGE_VALID))
2634 if (!(flags & PAGE_WRITE))
2636 /* XXX: this code should not depend on lock_user */
2637 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2638 /* FIXME - should this return an error rather than just fail? */
2641 unlock_user(p, addr, l);
2643 if (!(flags & PAGE_READ))
2645 /* XXX: this code should not depend on lock_user */
2646 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2647 /* FIXME - should this return an error rather than just fail? */
2650 unlock_user(p, addr, 0);
2659 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2660 int len, int is_write)
2665 target_phys_addr_t page;
2670 page = addr & TARGET_PAGE_MASK;
2671 l = (page + TARGET_PAGE_SIZE) - addr;
2674 p = phys_page_find(page >> TARGET_PAGE_BITS);
2676 pd = IO_MEM_UNASSIGNED;
2678 pd = p->phys_offset;
2682 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2683 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2684 /* XXX: could force cpu_single_env to NULL to avoid
2686 if (l >= 4 && ((addr & 3) == 0)) {
2687 /* 32 bit write access */
2689 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2691 } else if (l >= 2 && ((addr & 1) == 0)) {
2692 /* 16 bit write access */
2694 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2697 /* 8 bit write access */
2699 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2703 unsigned long addr1;
2704 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2706 ptr = phys_ram_base + addr1;
2707 memcpy(ptr, buf, l);
2708 if (!cpu_physical_memory_is_dirty(addr1)) {
2709 /* invalidate code */
2710 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2712 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2713 (0xff & ~CODE_DIRTY_FLAG);
2717 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2718 !(pd & IO_MEM_ROMD)) {
2720 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2721 if (l >= 4 && ((addr & 3) == 0)) {
2722 /* 32 bit read access */
2723 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2726 } else if (l >= 2 && ((addr & 1) == 0)) {
2727 /* 16 bit read access */
2728 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2732 /* 8 bit read access */
2733 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2739 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740 (addr & ~TARGET_PAGE_MASK);
2741 memcpy(buf, ptr, l);
2750 /* used for ROM loading : can write in RAM and ROM */
2751 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2752 const uint8_t *buf, int len)
2756 target_phys_addr_t page;
2761 page = addr & TARGET_PAGE_MASK;
2762 l = (page + TARGET_PAGE_SIZE) - addr;
2765 p = phys_page_find(page >> TARGET_PAGE_BITS);
2767 pd = IO_MEM_UNASSIGNED;
2769 pd = p->phys_offset;
2772 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2773 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2774 !(pd & IO_MEM_ROMD)) {
2777 unsigned long addr1;
2778 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2780 ptr = phys_ram_base + addr1;
2781 memcpy(ptr, buf, l);
2790 /* warning: addr must be aligned */
2791 uint32_t ldl_phys(target_phys_addr_t addr)
2799 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2801 pd = IO_MEM_UNASSIGNED;
2803 pd = p->phys_offset;
2806 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2807 !(pd & IO_MEM_ROMD)) {
2809 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2810 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2813 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2814 (addr & ~TARGET_PAGE_MASK);
2820 /* warning: addr must be aligned */
2821 uint64_t ldq_phys(target_phys_addr_t addr)
2829 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2831 pd = IO_MEM_UNASSIGNED;
2833 pd = p->phys_offset;
2836 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2837 !(pd & IO_MEM_ROMD)) {
2839 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2840 #ifdef TARGET_WORDS_BIGENDIAN
2841 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2842 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2844 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2845 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2849 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2850 (addr & ~TARGET_PAGE_MASK);
2857 uint32_t ldub_phys(target_phys_addr_t addr)
2860 cpu_physical_memory_read(addr, &val, 1);
2865 uint32_t lduw_phys(target_phys_addr_t addr)
2868 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2869 return tswap16(val);
2872 /* warning: addr must be aligned. The ram page is not masked as dirty
2873 and the code inside is not invalidated. It is useful if the dirty
2874 bits are used to track modified PTEs */
2875 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2882 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2884 pd = IO_MEM_UNASSIGNED;
2886 pd = p->phys_offset;
2889 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2890 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2891 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2893 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2894 (addr & ~TARGET_PAGE_MASK);
2899 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2906 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2908 pd = IO_MEM_UNASSIGNED;
2910 pd = p->phys_offset;
2913 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2914 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2915 #ifdef TARGET_WORDS_BIGENDIAN
2916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2917 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2919 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2920 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2923 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2924 (addr & ~TARGET_PAGE_MASK);
2929 /* warning: addr must be aligned */
2930 void stl_phys(target_phys_addr_t addr, uint32_t val)
2937 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2939 pd = IO_MEM_UNASSIGNED;
2941 pd = p->phys_offset;
2944 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2945 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2946 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2948 unsigned long addr1;
2949 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2951 ptr = phys_ram_base + addr1;
2953 if (!cpu_physical_memory_is_dirty(addr1)) {
2954 /* invalidate code */
2955 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2957 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2958 (0xff & ~CODE_DIRTY_FLAG);
2964 void stb_phys(target_phys_addr_t addr, uint32_t val)
2967 cpu_physical_memory_write(addr, &v, 1);
2971 void stw_phys(target_phys_addr_t addr, uint32_t val)
2973 uint16_t v = tswap16(val);
2974 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2978 void stq_phys(target_phys_addr_t addr, uint64_t val)
2981 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2986 /* virtual memory access for debug */
2987 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2988 uint8_t *buf, int len, int is_write)
2991 target_phys_addr_t phys_addr;
2995 page = addr & TARGET_PAGE_MASK;
2996 phys_addr = cpu_get_phys_page_debug(env, page);
2997 /* if no physical page mapped, return an error */
2998 if (phys_addr == -1)
3000 l = (page + TARGET_PAGE_SIZE) - addr;
3003 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3012 void dump_exec_info(FILE *f,
3013 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3015 int i, target_code_size, max_target_code_size;
3016 int direct_jmp_count, direct_jmp2_count, cross_page;
3017 TranslationBlock *tb;
3019 target_code_size = 0;
3020 max_target_code_size = 0;
3022 direct_jmp_count = 0;
3023 direct_jmp2_count = 0;
3024 for(i = 0; i < nb_tbs; i++) {
3026 target_code_size += tb->size;
3027 if (tb->size > max_target_code_size)
3028 max_target_code_size = tb->size;
3029 if (tb->page_addr[1] != -1)
3031 if (tb->tb_next_offset[0] != 0xffff) {
3033 if (tb->tb_next_offset[1] != 0xffff) {
3034 direct_jmp2_count++;
3038 /* XXX: avoid using doubles ? */
3039 cpu_fprintf(f, "Translation buffer state:\n");
3040 cpu_fprintf(f, "gen code size %ld/%ld\n",
3041 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3042 cpu_fprintf(f, "TB count %d/%d\n",
3043 nb_tbs, code_gen_max_blocks);
3044 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3045 nb_tbs ? target_code_size / nb_tbs : 0,
3046 max_target_code_size);
3047 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3048 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3049 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3050 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3052 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3053 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3055 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3057 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3058 cpu_fprintf(f, "\nStatistics:\n");
3059 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3060 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3061 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3062 tcg_dump_info(f, cpu_fprintf);
3065 #if !defined(CONFIG_USER_ONLY)
3067 #define MMUSUFFIX _cmmu
3068 #define GETPC() NULL
3069 #define env cpu_single_env
3070 #define SOFTMMU_CODE_ACCESS
3073 #include "softmmu_template.h"
3076 #include "softmmu_template.h"
3079 #include "softmmu_template.h"
3082 #include "softmmu_template.h"