2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66 /* any access to the tbs or the page table must use this lock */
67 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
69 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70 uint8_t *code_gen_ptr;
74 uint8_t *phys_ram_base;
75 uint8_t *phys_ram_dirty;
78 /* current CPU in the current thread. It is only valid inside
80 CPUState *cpu_single_env;
82 typedef struct PageDesc {
83 /* list of TBs intersecting this ram page */
84 TranslationBlock *first_tb;
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count;
89 #if defined(CONFIG_USER_ONLY)
94 typedef struct PhysPageDesc {
95 /* offset in host memory of the page + io_index in the low 12 bits */
100 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
102 #define L1_SIZE (1 << L1_BITS)
103 #define L2_SIZE (1 << L2_BITS)
105 static void io_mem_init(void);
107 unsigned long qemu_real_host_page_size;
108 unsigned long qemu_host_page_bits;
109 unsigned long qemu_host_page_size;
110 unsigned long qemu_host_page_mask;
112 /* XXX: for system emulation, it could just be an array */
113 static PageDesc *l1_map[L1_SIZE];
114 PhysPageDesc **l1_phys_map;
116 /* io memory support */
117 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
119 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
120 static int io_mem_nb;
123 char *logfilename = "/tmp/qemu.log";
128 static int tlb_flush_count;
129 static int tb_flush_count;
130 static int tb_phys_invalidate_count;
132 static void page_init(void)
134 /* NOTE: we can always suppose that qemu_host_page_size >=
138 SYSTEM_INFO system_info;
141 GetSystemInfo(&system_info);
142 qemu_real_host_page_size = system_info.dwPageSize;
144 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145 PAGE_EXECUTE_READWRITE, &old_protect);
148 qemu_real_host_page_size = getpagesize();
150 unsigned long start, end;
152 start = (unsigned long)code_gen_buffer;
153 start &= ~(qemu_real_host_page_size - 1);
155 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156 end += qemu_real_host_page_size - 1;
157 end &= ~(qemu_real_host_page_size - 1);
159 mprotect((void *)start, end - start,
160 PROT_READ | PROT_WRITE | PROT_EXEC);
164 if (qemu_host_page_size == 0)
165 qemu_host_page_size = qemu_real_host_page_size;
166 if (qemu_host_page_size < TARGET_PAGE_SIZE)
167 qemu_host_page_size = TARGET_PAGE_SIZE;
168 qemu_host_page_bits = 0;
169 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170 qemu_host_page_bits++;
171 qemu_host_page_mask = ~(qemu_host_page_size - 1);
172 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
176 static inline PageDesc *page_find_alloc(unsigned int index)
180 lp = &l1_map[index >> L2_BITS];
183 /* allocate if not found */
184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
188 return p + (index & (L2_SIZE - 1));
191 static inline PageDesc *page_find(unsigned int index)
195 p = l1_map[index >> L2_BITS];
198 return p + (index & (L2_SIZE - 1));
201 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
205 p = (void **)l1_phys_map;
206 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
208 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
211 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
214 /* allocate if not found */
217 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218 memset(p, 0, sizeof(void *) * L1_SIZE);
222 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
225 /* allocate if not found */
228 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
229 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
232 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
235 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
237 return phys_page_find_alloc(index, 0);
240 #if !defined(CONFIG_USER_ONLY)
241 static void tlb_protect_code(ram_addr_t ram_addr);
242 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
246 void cpu_exec_init(CPUState *env)
252 code_gen_ptr = code_gen_buffer;
256 env->next_cpu = NULL;
259 while (*penv != NULL) {
260 penv = (CPUState **)&(*penv)->next_cpu;
263 env->cpu_index = cpu_index;
267 static inline void invalidate_page_bitmap(PageDesc *p)
269 if (p->code_bitmap) {
270 qemu_free(p->code_bitmap);
271 p->code_bitmap = NULL;
273 p->code_write_count = 0;
276 /* set to NULL all the 'first_tb' fields in all PageDescs */
277 static void page_flush_tb(void)
282 for(i = 0; i < L1_SIZE; i++) {
285 for(j = 0; j < L2_SIZE; j++) {
287 invalidate_page_bitmap(p);
294 /* flush all the translation blocks */
295 /* XXX: tb_flush is currently not thread safe */
296 void tb_flush(CPUState *env1)
299 #if defined(DEBUG_FLUSH)
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr - code_gen_buffer,
303 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
307 for(env = first_cpu; env != NULL; env = env->next_cpu) {
308 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
311 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
314 code_gen_ptr = code_gen_buffer;
315 /* XXX: flush processor icache at this point if cache flush is
320 #ifdef DEBUG_TB_CHECK
322 static void tb_invalidate_check(unsigned long address)
324 TranslationBlock *tb;
326 address &= TARGET_PAGE_MASK;
327 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330 address >= tb->pc + tb->size)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address, tb->pc, tb->size);
338 /* verify that all the pages have correct rights for code */
339 static void tb_page_check(void)
341 TranslationBlock *tb;
342 int i, flags1, flags2;
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 flags1 = page_get_flags(tb->pc);
347 flags2 = page_get_flags(tb->pc + tb->size - 1);
348 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb->pc, tb->size, flags1, flags2);
356 void tb_jmp_check(TranslationBlock *tb)
358 TranslationBlock *tb1;
361 /* suppress any remaining jumps to this TB */
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
368 tb1 = tb1->jmp_next[n1];
370 /* check end of list */
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
378 /* invalidate one TB */
379 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
382 TranslationBlock *tb1;
386 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
389 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
393 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
395 TranslationBlock *tb1;
401 tb1 = (TranslationBlock *)((long)tb1 & ~3);
403 *ptb = tb1->page_next[n1];
406 ptb = &tb1->page_next[n1];
410 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
412 TranslationBlock *tb1, **ptb;
415 ptb = &tb->jmp_next[n];
418 /* find tb(n) in circular list */
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (n1 == n && tb1 == tb)
426 ptb = &tb1->jmp_first;
428 ptb = &tb1->jmp_next[n1];
431 /* now we can suppress tb(n) from the list */
432 *ptb = tb->jmp_next[n];
434 tb->jmp_next[n] = NULL;
438 /* reset the jump entry 'n' of a TB so that it is not chained to
440 static inline void tb_reset_jump(TranslationBlock *tb, int n)
442 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
445 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
450 target_ulong phys_pc;
451 TranslationBlock *tb1, *tb2;
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
471 tb_invalidated_flag = 1;
473 /* remove the TB from the hash list */
474 h = tb_jmp_cache_hash_func(tb->pc);
475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
476 if (env->tb_jmp_cache[h] == tb)
477 env->tb_jmp_cache[h] = NULL;
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb, 0);
482 tb_jmp_remove(tb, 1);
484 /* suppress any remaining jumps to this TB */
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 tb2 = tb1->jmp_next[n1];
492 tb_reset_jump(tb1, n1);
493 tb1->jmp_next[n1] = NULL;
496 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
498 tb_phys_invalidate_count++;
501 static inline void set_bits(uint8_t *tab, int start, int len)
507 mask = 0xff << (start & 7);
508 if ((start & ~7) == (end & ~7)) {
510 mask &= ~(0xff << (end & 7));
515 start = (start + 8) & ~7;
517 while (start < end1) {
522 mask = ~(0xff << (end & 7));
528 static void build_page_bitmap(PageDesc *p)
530 int n, tb_start, tb_end;
531 TranslationBlock *tb;
533 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
536 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
541 tb = (TranslationBlock *)((long)tb & ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start = tb->pc & ~TARGET_PAGE_MASK;
547 tb_end = tb_start + tb->size;
548 if (tb_end > TARGET_PAGE_SIZE)
549 tb_end = TARGET_PAGE_SIZE;
552 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
554 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555 tb = tb->page_next[n];
559 #ifdef TARGET_HAS_PRECISE_SMC
561 static void tb_gen_code(CPUState *env,
562 target_ulong pc, target_ulong cs_base, int flags,
565 TranslationBlock *tb;
567 target_ulong phys_pc, phys_page2, virt_page2;
570 phys_pc = get_phys_addr_code(env, pc);
573 /* flush must be done */
575 /* cannot fail at this point */
578 tc_ptr = code_gen_ptr;
580 tb->cs_base = cs_base;
583 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
586 /* check next page if needed */
587 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
589 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
590 phys_page2 = get_phys_addr_code(env, virt_page2);
592 tb_link_phys(tb, phys_pc, phys_page2);
596 /* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
602 int is_cpu_write_access)
604 int n, current_tb_modified, current_tb_not_found, current_flags;
605 CPUState *env = cpu_single_env;
607 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
608 target_ulong tb_start, tb_end;
609 target_ulong current_pc, current_cs_base;
611 p = page_find(start >> TARGET_PAGE_BITS);
614 if (!p->code_bitmap &&
615 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616 is_cpu_write_access) {
617 /* build code bitmap */
618 build_page_bitmap(p);
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
623 current_tb_not_found = is_cpu_write_access;
624 current_tb_modified = 0;
625 current_tb = NULL; /* avoid warning */
626 current_pc = 0; /* avoid warning */
627 current_cs_base = 0; /* avoid warning */
628 current_flags = 0; /* avoid warning */
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 /* NOTE: this is subtle as a TB may span two physical pages */
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639 tb_end = tb_start + tb->size;
641 tb_start = tb->page_addr[1];
642 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
644 if (!(tb_end <= start || tb_start >= end)) {
645 #ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found) {
647 current_tb_not_found = 0;
649 if (env->mem_write_pc) {
650 /* now we have a real cpu fault */
651 current_tb = tb_find_pc(env->mem_write_pc);
654 if (current_tb == tb &&
655 !(current_tb->cflags & CF_SINGLE_INSN)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
662 current_tb_modified = 1;
663 cpu_restore_state(current_tb, env,
664 env->mem_write_pc, NULL);
665 #if defined(TARGET_I386)
666 current_flags = env->hflags;
667 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668 current_cs_base = (target_ulong)env->segs[R_CS].base;
669 current_pc = current_cs_base + env->eip;
671 #error unsupported CPU
674 #endif /* TARGET_HAS_PRECISE_SMC */
675 saved_tb = env->current_tb;
676 env->current_tb = NULL;
677 tb_phys_invalidate(tb, -1);
678 env->current_tb = saved_tb;
679 if (env->interrupt_request && env->current_tb)
680 cpu_interrupt(env, env->interrupt_request);
684 #if !defined(CONFIG_USER_ONLY)
685 /* if no code remaining, no need to continue to use slow writes */
687 invalidate_page_bitmap(p);
688 if (is_cpu_write_access) {
689 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
693 #ifdef TARGET_HAS_PRECISE_SMC
694 if (current_tb_modified) {
695 /* we generate a block containing just the instruction
696 modifying the memory. It will ensure that it cannot modify
698 env->current_tb = NULL;
699 tb_gen_code(env, current_pc, current_cs_base, current_flags,
701 cpu_resume_from_signal(env, NULL);
706 /* len must be <= 8 and start must be a multiple of len */
707 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
714 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
715 cpu_single_env->mem_write_vaddr, len,
717 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
721 p = page_find(start >> TARGET_PAGE_BITS);
724 if (p->code_bitmap) {
725 offset = start & ~TARGET_PAGE_MASK;
726 b = p->code_bitmap[offset >> 3] >> (offset & 7);
727 if (b & ((1 << len) - 1))
731 tb_invalidate_phys_page_range(start, start + len, 1);
735 #if !defined(CONFIG_SOFTMMU)
736 static void tb_invalidate_phys_page(target_ulong addr,
737 unsigned long pc, void *puc)
739 int n, current_flags, current_tb_modified;
740 target_ulong current_pc, current_cs_base;
742 TranslationBlock *tb, *current_tb;
743 #ifdef TARGET_HAS_PRECISE_SMC
744 CPUState *env = cpu_single_env;
747 addr &= TARGET_PAGE_MASK;
748 p = page_find(addr >> TARGET_PAGE_BITS);
752 current_tb_modified = 0;
754 current_pc = 0; /* avoid warning */
755 current_cs_base = 0; /* avoid warning */
756 current_flags = 0; /* avoid warning */
757 #ifdef TARGET_HAS_PRECISE_SMC
759 current_tb = tb_find_pc(pc);
764 tb = (TranslationBlock *)((long)tb & ~3);
765 #ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb == tb &&
767 !(current_tb->cflags & CF_SINGLE_INSN)) {
768 /* If we are modifying the current TB, we must stop
769 its execution. We could be more precise by checking
770 that the modification is after the current PC, but it
771 would require a specialized function to partially
772 restore the CPU state */
774 current_tb_modified = 1;
775 cpu_restore_state(current_tb, env, pc, puc);
776 #if defined(TARGET_I386)
777 current_flags = env->hflags;
778 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
779 current_cs_base = (target_ulong)env->segs[R_CS].base;
780 current_pc = current_cs_base + env->eip;
782 #error unsupported CPU
785 #endif /* TARGET_HAS_PRECISE_SMC */
786 tb_phys_invalidate(tb, addr);
787 tb = tb->page_next[n];
790 #ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
795 env->current_tb = NULL;
796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
798 cpu_resume_from_signal(env, puc);
804 /* add the tb in the target page and protect it if necessary */
805 static inline void tb_alloc_page(TranslationBlock *tb,
806 unsigned int n, unsigned int page_addr)
809 TranslationBlock *last_first_tb;
811 tb->page_addr[n] = page_addr;
812 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
813 tb->page_next[n] = p->first_tb;
814 last_first_tb = p->first_tb;
815 p->first_tb = (TranslationBlock *)((long)tb | n);
816 invalidate_page_bitmap(p);
818 #if defined(TARGET_HAS_SMC) || 1
820 #if defined(CONFIG_USER_ONLY)
821 if (p->flags & PAGE_WRITE) {
822 unsigned long host_start, host_end, addr;
825 /* force the host page as non writable (writes will have a
826 page fault + mprotect overhead) */
827 host_start = page_addr & qemu_host_page_mask;
828 host_end = host_start + qemu_host_page_size;
830 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
831 prot |= page_get_flags(addr);
832 mprotect((void *)host_start, qemu_host_page_size,
833 (prot & PAGE_BITS) & ~PAGE_WRITE);
834 #ifdef DEBUG_TB_INVALIDATE
835 printf("protecting code page: 0x%08lx\n",
838 p->flags &= ~PAGE_WRITE;
841 /* if some code is already present, then the pages are already
842 protected. So we handle the case where only the first TB is
843 allocated in a physical page */
844 if (!last_first_tb) {
845 tlb_protect_code(page_addr);
849 #endif /* TARGET_HAS_SMC */
852 /* Allocate a new translation block. Flush the translation buffer if
853 too many translation blocks or too much generated code. */
854 TranslationBlock *tb_alloc(target_ulong pc)
856 TranslationBlock *tb;
858 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
859 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
867 /* add a new TB and link it to the physical page tables. phys_page2 is
868 (-1) to indicate that only one page contains the TB. */
869 void tb_link_phys(TranslationBlock *tb,
870 target_ulong phys_pc, target_ulong phys_page2)
873 TranslationBlock **ptb;
875 /* add in the physical hash table */
876 h = tb_phys_hash_func(phys_pc);
877 ptb = &tb_phys_hash[h];
878 tb->phys_hash_next = *ptb;
881 /* add in the page list */
882 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
883 if (phys_page2 != -1)
884 tb_alloc_page(tb, 1, phys_page2);
886 tb->page_addr[1] = -1;
888 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
889 tb->jmp_next[0] = NULL;
890 tb->jmp_next[1] = NULL;
892 tb->cflags &= ~CF_FP_USED;
893 if (tb->cflags & CF_TB_FP_USED)
894 tb->cflags |= CF_FP_USED;
897 /* init original jump addresses */
898 if (tb->tb_next_offset[0] != 0xffff)
899 tb_reset_jump(tb, 0);
900 if (tb->tb_next_offset[1] != 0xffff)
901 tb_reset_jump(tb, 1);
903 #ifdef DEBUG_TB_CHECK
908 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
909 tb[1].tc_ptr. Return NULL if not found */
910 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
914 TranslationBlock *tb;
918 if (tc_ptr < (unsigned long)code_gen_buffer ||
919 tc_ptr >= (unsigned long)code_gen_ptr)
921 /* binary search (cf Knuth) */
924 while (m_min <= m_max) {
925 m = (m_min + m_max) >> 1;
927 v = (unsigned long)tb->tc_ptr;
930 else if (tc_ptr < v) {
939 static void tb_reset_jump_recursive(TranslationBlock *tb);
941 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
943 TranslationBlock *tb1, *tb_next, **ptb;
946 tb1 = tb->jmp_next[n];
948 /* find head of list */
951 tb1 = (TranslationBlock *)((long)tb1 & ~3);
954 tb1 = tb1->jmp_next[n1];
956 /* we are now sure now that tb jumps to tb1 */
959 /* remove tb from the jmp_first list */
960 ptb = &tb_next->jmp_first;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 if (n1 == n && tb1 == tb)
967 ptb = &tb1->jmp_next[n1];
969 *ptb = tb->jmp_next[n];
970 tb->jmp_next[n] = NULL;
972 /* suppress the jump to next tb in generated code */
973 tb_reset_jump(tb, n);
975 /* suppress jumps in the tb on which we could have jumped */
976 tb_reset_jump_recursive(tb_next);
980 static void tb_reset_jump_recursive(TranslationBlock *tb)
982 tb_reset_jump_recursive2(tb, 0);
983 tb_reset_jump_recursive2(tb, 1);
986 #if defined(TARGET_HAS_ICE)
987 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
989 target_ulong phys_addr;
991 phys_addr = cpu_get_phys_page_debug(env, pc);
992 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
996 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
997 breakpoint is reached */
998 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1000 #if defined(TARGET_HAS_ICE)
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1008 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1010 env->breakpoints[env->nb_breakpoints++] = pc;
1012 breakpoint_invalidate(env, pc);
1019 /* remove a breakpoint */
1020 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1022 #if defined(TARGET_HAS_ICE)
1024 for(i = 0; i < env->nb_breakpoints; i++) {
1025 if (env->breakpoints[i] == pc)
1030 env->nb_breakpoints--;
1031 if (i < env->nb_breakpoints)
1032 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1034 breakpoint_invalidate(env, pc);
1041 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1042 CPU loop after each instruction */
1043 void cpu_single_step(CPUState *env, int enabled)
1045 #if defined(TARGET_HAS_ICE)
1046 if (env->singlestep_enabled != enabled) {
1047 env->singlestep_enabled = enabled;
1048 /* must flush all the translated code to avoid inconsistancies */
1049 /* XXX: only flush what is necessary */
1055 /* enable or disable low levels log */
1056 void cpu_set_log(int log_flags)
1058 loglevel = log_flags;
1059 if (loglevel && !logfile) {
1060 logfile = fopen(logfilename, "w");
1062 perror(logfilename);
1065 #if !defined(CONFIG_SOFTMMU)
1066 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1068 static uint8_t logfile_buf[4096];
1069 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1072 setvbuf(logfile, NULL, _IOLBF, 0);
1077 void cpu_set_log_filename(const char *filename)
1079 logfilename = strdup(filename);
1082 /* mask must never be zero, except for A20 change call */
1083 void cpu_interrupt(CPUState *env, int mask)
1085 TranslationBlock *tb;
1086 static int interrupt_lock;
1088 env->interrupt_request |= mask;
1089 /* if the cpu is currently executing code, we must unlink it and
1090 all the potentially executing TB */
1091 tb = env->current_tb;
1092 if (tb && !testandset(&interrupt_lock)) {
1093 env->current_tb = NULL;
1094 tb_reset_jump_recursive(tb);
1099 void cpu_reset_interrupt(CPUState *env, int mask)
1101 env->interrupt_request &= ~mask;
1104 CPULogItem cpu_log_items[] = {
1105 { CPU_LOG_TB_OUT_ASM, "out_asm",
1106 "show generated host assembly code for each compiled TB" },
1107 { CPU_LOG_TB_IN_ASM, "in_asm",
1108 "show target assembly code for each compiled TB" },
1109 { CPU_LOG_TB_OP, "op",
1110 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1112 { CPU_LOG_TB_OP_OPT, "op_opt",
1113 "show micro ops after optimization for each compiled TB" },
1115 { CPU_LOG_INT, "int",
1116 "show interrupts/exceptions in short format" },
1117 { CPU_LOG_EXEC, "exec",
1118 "show trace before each executed TB (lots of logs)" },
1119 { CPU_LOG_TB_CPU, "cpu",
1120 "show CPU state before bloc translation" },
1122 { CPU_LOG_PCALL, "pcall",
1123 "show protected mode far calls/returns/exceptions" },
1126 { CPU_LOG_IOPORT, "ioport",
1127 "show all i/o ports accesses" },
1132 static int cmp1(const char *s1, int n, const char *s2)
1134 if (strlen(s2) != n)
1136 return memcmp(s1, s2, n) == 0;
1139 /* takes a comma separated list of log masks. Return 0 if error. */
1140 int cpu_str_to_log_mask(const char *str)
1149 p1 = strchr(p, ',');
1152 if(cmp1(p,p1-p,"all")) {
1153 for(item = cpu_log_items; item->mask != 0; item++) {
1157 for(item = cpu_log_items; item->mask != 0; item++) {
1158 if (cmp1(p, p1 - p, item->name))
1172 void cpu_abort(CPUState *env, const char *fmt, ...)
1177 fprintf(stderr, "qemu: fatal: ");
1178 vfprintf(stderr, fmt, ap);
1179 fprintf(stderr, "\n");
1181 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1183 cpu_dump_state(env, stderr, fprintf, 0);
1189 #if !defined(CONFIG_USER_ONLY)
1191 /* NOTE: if flush_global is true, also flush global entries (not
1193 void tlb_flush(CPUState *env, int flush_global)
1197 #if defined(DEBUG_TLB)
1198 printf("tlb_flush:\n");
1200 /* must reset current TB so that interrupts cannot modify the
1201 links while we are modifying them */
1202 env->current_tb = NULL;
1204 for(i = 0; i < CPU_TLB_SIZE; i++) {
1205 env->tlb_read[0][i].address = -1;
1206 env->tlb_write[0][i].address = -1;
1207 env->tlb_read[1][i].address = -1;
1208 env->tlb_write[1][i].address = -1;
1211 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1213 #if !defined(CONFIG_SOFTMMU)
1214 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1217 if (env->kqemu_enabled) {
1218 kqemu_flush(env, flush_global);
1224 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1226 if (addr == (tlb_entry->address &
1227 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1228 tlb_entry->address = -1;
1231 void tlb_flush_page(CPUState *env, target_ulong addr)
1234 TranslationBlock *tb;
1236 #if defined(DEBUG_TLB)
1237 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1239 /* must reset current TB so that interrupts cannot modify the
1240 links while we are modifying them */
1241 env->current_tb = NULL;
1243 addr &= TARGET_PAGE_MASK;
1244 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1245 tlb_flush_entry(&env->tlb_read[0][i], addr);
1246 tlb_flush_entry(&env->tlb_write[0][i], addr);
1247 tlb_flush_entry(&env->tlb_read[1][i], addr);
1248 tlb_flush_entry(&env->tlb_write[1][i], addr);
1250 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1251 tb = env->tb_jmp_cache[i];
1253 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1254 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1255 env->tb_jmp_cache[i] = NULL;
1259 #if !defined(CONFIG_SOFTMMU)
1260 if (addr < MMAP_AREA_END)
1261 munmap((void *)addr, TARGET_PAGE_SIZE);
1264 if (env->kqemu_enabled) {
1265 kqemu_flush_page(env, addr);
1270 /* update the TLBs so that writes to code in the virtual page 'addr'
1272 static void tlb_protect_code(ram_addr_t ram_addr)
1274 cpu_physical_memory_reset_dirty(ram_addr,
1275 ram_addr + TARGET_PAGE_SIZE,
1279 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1280 tested for self modifying code */
1281 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1284 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1287 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1288 unsigned long start, unsigned long length)
1291 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1292 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1293 if ((addr - start) < length) {
1294 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1299 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1303 unsigned long length, start1;
1307 start &= TARGET_PAGE_MASK;
1308 end = TARGET_PAGE_ALIGN(end);
1310 length = end - start;
1313 len = length >> TARGET_PAGE_BITS;
1315 /* XXX: should not depend on cpu context */
1317 if (env->kqemu_enabled) {
1320 for(i = 0; i < len; i++) {
1321 kqemu_set_notdirty(env, addr);
1322 addr += TARGET_PAGE_SIZE;
1326 mask = ~dirty_flags;
1327 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1328 for(i = 0; i < len; i++)
1331 /* we modify the TLB cache so that the dirty bit will be set again
1332 when accessing the range */
1333 start1 = start + (unsigned long)phys_ram_base;
1334 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1335 for(i = 0; i < CPU_TLB_SIZE; i++)
1336 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1337 for(i = 0; i < CPU_TLB_SIZE; i++)
1338 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1341 #if !defined(CONFIG_SOFTMMU)
1342 /* XXX: this is expensive */
1348 for(i = 0; i < L1_SIZE; i++) {
1351 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1352 for(j = 0; j < L2_SIZE; j++) {
1353 if (p->valid_tag == virt_valid_tag &&
1354 p->phys_addr >= start && p->phys_addr < end &&
1355 (p->prot & PROT_WRITE)) {
1356 if (addr < MMAP_AREA_END) {
1357 mprotect((void *)addr, TARGET_PAGE_SIZE,
1358 p->prot & ~PROT_WRITE);
1361 addr += TARGET_PAGE_SIZE;
1370 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1372 ram_addr_t ram_addr;
1374 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1375 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1376 tlb_entry->addend - (unsigned long)phys_ram_base;
1377 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1378 tlb_entry->address |= IO_MEM_NOTDIRTY;
1383 /* update the TLB according to the current state of the dirty bits */
1384 void cpu_tlb_update_dirty(CPUState *env)
1387 for(i = 0; i < CPU_TLB_SIZE; i++)
1388 tlb_update_dirty(&env->tlb_write[0][i]);
1389 for(i = 0; i < CPU_TLB_SIZE; i++)
1390 tlb_update_dirty(&env->tlb_write[1][i]);
1393 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1394 unsigned long start)
1397 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1398 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1399 if (addr == start) {
1400 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1405 /* update the TLB corresponding to virtual page vaddr and phys addr
1406 addr so that it is no longer dirty */
1407 static inline void tlb_set_dirty(CPUState *env,
1408 unsigned long addr, target_ulong vaddr)
1412 addr &= TARGET_PAGE_MASK;
1413 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1414 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1415 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1418 /* add a new TLB entry. At most one entry for a given virtual address
1419 is permitted. Return 0 if OK or 2 if the page could not be mapped
1420 (can only happen in non SOFTMMU mode for I/O pages or pages
1421 conflicting with the host address space). */
1422 int tlb_set_page(CPUState *env, target_ulong vaddr,
1423 target_phys_addr_t paddr, int prot,
1424 int is_user, int is_softmmu)
1429 target_ulong address;
1430 target_phys_addr_t addend;
1433 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1435 pd = IO_MEM_UNASSIGNED;
1437 pd = p->phys_offset;
1439 #if defined(DEBUG_TLB)
1440 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1441 vaddr, paddr, prot, is_user, is_softmmu, pd);
1445 #if !defined(CONFIG_SOFTMMU)
1449 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1450 /* IO memory case */
1451 address = vaddr | pd;
1454 /* standard memory */
1456 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1459 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1461 if (prot & PAGE_READ) {
1462 env->tlb_read[is_user][index].address = address;
1463 env->tlb_read[is_user][index].addend = addend;
1465 env->tlb_read[is_user][index].address = -1;
1466 env->tlb_read[is_user][index].addend = -1;
1468 if (prot & PAGE_WRITE) {
1469 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1470 /* ROM: access is ignored (same as unassigned) */
1471 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1472 env->tlb_write[is_user][index].addend = addend;
1473 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1474 !cpu_physical_memory_is_dirty(pd)) {
1475 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1476 env->tlb_write[is_user][index].addend = addend;
1478 env->tlb_write[is_user][index].address = address;
1479 env->tlb_write[is_user][index].addend = addend;
1482 env->tlb_write[is_user][index].address = -1;
1483 env->tlb_write[is_user][index].addend = -1;
1486 #if !defined(CONFIG_SOFTMMU)
1488 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1489 /* IO access: no mapping is done as it will be handled by the
1491 if (!(env->hflags & HF_SOFTMMU_MASK))
1496 if (vaddr >= MMAP_AREA_END) {
1499 if (prot & PROT_WRITE) {
1500 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1501 #if defined(TARGET_HAS_SMC) || 1
1504 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1505 !cpu_physical_memory_is_dirty(pd))) {
1506 /* ROM: we do as if code was inside */
1507 /* if code is present, we only map as read only and save the
1511 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1514 vp->valid_tag = virt_valid_tag;
1515 prot &= ~PAGE_WRITE;
1518 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1519 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1520 if (map_addr == MAP_FAILED) {
1521 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1531 /* called from signal handler: invalidate the code and unprotect the
1532 page. Return TRUE if the fault was succesfully handled. */
1533 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1535 #if !defined(CONFIG_SOFTMMU)
1538 #if defined(DEBUG_TLB)
1539 printf("page_unprotect: addr=0x%08x\n", addr);
1541 addr &= TARGET_PAGE_MASK;
1543 /* if it is not mapped, no need to worry here */
1544 if (addr >= MMAP_AREA_END)
1546 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1549 /* NOTE: in this case, validate_tag is _not_ tested as it
1550 validates only the code TLB */
1551 if (vp->valid_tag != virt_valid_tag)
1553 if (!(vp->prot & PAGE_WRITE))
1555 #if defined(DEBUG_TLB)
1556 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1557 addr, vp->phys_addr, vp->prot);
1559 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1560 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1561 (unsigned long)addr, vp->prot);
1562 /* set the dirty bit */
1563 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1564 /* flush the code inside */
1565 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1574 void tlb_flush(CPUState *env, int flush_global)
1578 void tlb_flush_page(CPUState *env, target_ulong addr)
1582 int tlb_set_page(CPUState *env, target_ulong vaddr,
1583 target_phys_addr_t paddr, int prot,
1584 int is_user, int is_softmmu)
1589 /* dump memory mappings */
1590 void page_dump(FILE *f)
1592 unsigned long start, end;
1593 int i, j, prot, prot1;
1596 fprintf(f, "%-8s %-8s %-8s %s\n",
1597 "start", "end", "size", "prot");
1601 for(i = 0; i <= L1_SIZE; i++) {
1606 for(j = 0;j < L2_SIZE; j++) {
1611 if (prot1 != prot) {
1612 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1614 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1615 start, end, end - start,
1616 prot & PAGE_READ ? 'r' : '-',
1617 prot & PAGE_WRITE ? 'w' : '-',
1618 prot & PAGE_EXEC ? 'x' : '-');
1632 int page_get_flags(unsigned long address)
1636 p = page_find(address >> TARGET_PAGE_BITS);
1642 /* modify the flags of a page and invalidate the code if
1643 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1644 depending on PAGE_WRITE */
1645 void page_set_flags(unsigned long start, unsigned long end, int flags)
1650 start = start & TARGET_PAGE_MASK;
1651 end = TARGET_PAGE_ALIGN(end);
1652 if (flags & PAGE_WRITE)
1653 flags |= PAGE_WRITE_ORG;
1654 spin_lock(&tb_lock);
1655 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1656 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1657 /* if the write protection is set, then we invalidate the code
1659 if (!(p->flags & PAGE_WRITE) &&
1660 (flags & PAGE_WRITE) &&
1662 tb_invalidate_phys_page(addr, 0, NULL);
1666 spin_unlock(&tb_lock);
1669 /* called from signal handler: invalidate the code and unprotect the
1670 page. Return TRUE if the fault was succesfully handled. */
1671 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1673 unsigned int page_index, prot, pindex;
1675 unsigned long host_start, host_end, addr;
1677 host_start = address & qemu_host_page_mask;
1678 page_index = host_start >> TARGET_PAGE_BITS;
1679 p1 = page_find(page_index);
1682 host_end = host_start + qemu_host_page_size;
1685 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1689 /* if the page was really writable, then we change its
1690 protection back to writable */
1691 if (prot & PAGE_WRITE_ORG) {
1692 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1693 if (!(p1[pindex].flags & PAGE_WRITE)) {
1694 mprotect((void *)host_start, qemu_host_page_size,
1695 (prot & PAGE_BITS) | PAGE_WRITE);
1696 p1[pindex].flags |= PAGE_WRITE;
1697 /* and since the content will be modified, we must invalidate
1698 the corresponding translated code. */
1699 tb_invalidate_phys_page(address, pc, puc);
1700 #ifdef DEBUG_TB_CHECK
1701 tb_invalidate_check(address);
1709 /* call this function when system calls directly modify a memory area */
1710 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1712 unsigned long start, end, addr;
1714 start = (unsigned long)data;
1715 end = start + data_size;
1716 start &= TARGET_PAGE_MASK;
1717 end = TARGET_PAGE_ALIGN(end);
1718 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1719 page_unprotect(addr, 0, NULL);
1723 static inline void tlb_set_dirty(CPUState *env,
1724 unsigned long addr, target_ulong vaddr)
1727 #endif /* defined(CONFIG_USER_ONLY) */
1729 /* register physical memory. 'size' must be a multiple of the target
1730 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1732 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1734 unsigned long phys_offset)
1736 target_phys_addr_t addr, end_addr;
1739 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1740 end_addr = start_addr + size;
1741 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1742 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1743 p->phys_offset = phys_offset;
1744 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1745 phys_offset += TARGET_PAGE_SIZE;
1749 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1754 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1758 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1759 unassigned_mem_readb,
1760 unassigned_mem_readb,
1761 unassigned_mem_readb,
1764 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1765 unassigned_mem_writeb,
1766 unassigned_mem_writeb,
1767 unassigned_mem_writeb,
1770 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1772 unsigned long ram_addr;
1774 ram_addr = addr - (unsigned long)phys_ram_base;
1775 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1776 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1777 #if !defined(CONFIG_USER_ONLY)
1778 tb_invalidate_phys_page_fast(ram_addr, 1);
1779 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1782 stb_p((uint8_t *)(long)addr, val);
1783 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1784 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1785 /* we remove the notdirty callback only if the code has been
1787 if (dirty_flags == 0xff)
1788 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1791 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1793 unsigned long ram_addr;
1795 ram_addr = addr - (unsigned long)phys_ram_base;
1796 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1797 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1798 #if !defined(CONFIG_USER_ONLY)
1799 tb_invalidate_phys_page_fast(ram_addr, 2);
1800 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1803 stw_p((uint8_t *)(long)addr, val);
1804 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1805 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1806 /* we remove the notdirty callback only if the code has been
1808 if (dirty_flags == 0xff)
1809 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1812 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1814 unsigned long ram_addr;
1816 ram_addr = addr - (unsigned long)phys_ram_base;
1817 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1818 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1819 #if !defined(CONFIG_USER_ONLY)
1820 tb_invalidate_phys_page_fast(ram_addr, 4);
1821 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1824 stl_p((uint8_t *)(long)addr, val);
1825 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1826 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1827 /* we remove the notdirty callback only if the code has been
1829 if (dirty_flags == 0xff)
1830 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1833 static CPUReadMemoryFunc *error_mem_read[3] = {
1834 NULL, /* never used */
1835 NULL, /* never used */
1836 NULL, /* never used */
1839 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1840 notdirty_mem_writeb,
1841 notdirty_mem_writew,
1842 notdirty_mem_writel,
1845 static void io_mem_init(void)
1847 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1848 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1849 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1852 /* alloc dirty bits array */
1853 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1854 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1857 /* mem_read and mem_write are arrays of functions containing the
1858 function to access byte (index 0), word (index 1) and dword (index
1859 2). All functions must be supplied. If io_index is non zero, the
1860 corresponding io zone is modified. If it is zero, a new io zone is
1861 allocated. The return value can be used with
1862 cpu_register_physical_memory(). (-1) is returned if error. */
1863 int cpu_register_io_memory(int io_index,
1864 CPUReadMemoryFunc **mem_read,
1865 CPUWriteMemoryFunc **mem_write,
1870 if (io_index <= 0) {
1871 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1873 io_index = io_mem_nb++;
1875 if (io_index >= IO_MEM_NB_ENTRIES)
1879 for(i = 0;i < 3; i++) {
1880 io_mem_read[io_index][i] = mem_read[i];
1881 io_mem_write[io_index][i] = mem_write[i];
1883 io_mem_opaque[io_index] = opaque;
1884 return io_index << IO_MEM_SHIFT;
1887 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1889 return io_mem_write[io_index >> IO_MEM_SHIFT];
1892 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1894 return io_mem_read[io_index >> IO_MEM_SHIFT];
1897 /* physical memory access (slow version, mainly for debug) */
1898 #if defined(CONFIG_USER_ONLY)
1899 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1900 int len, int is_write)
1906 page = addr & TARGET_PAGE_MASK;
1907 l = (page + TARGET_PAGE_SIZE) - addr;
1910 flags = page_get_flags(page);
1911 if (!(flags & PAGE_VALID))
1914 if (!(flags & PAGE_WRITE))
1916 memcpy((uint8_t *)addr, buf, len);
1918 if (!(flags & PAGE_READ))
1920 memcpy(buf, (uint8_t *)addr, len);
1929 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1930 int len, int is_write)
1935 target_phys_addr_t page;
1940 page = addr & TARGET_PAGE_MASK;
1941 l = (page + TARGET_PAGE_SIZE) - addr;
1944 p = phys_page_find(page >> TARGET_PAGE_BITS);
1946 pd = IO_MEM_UNASSIGNED;
1948 pd = p->phys_offset;
1952 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1953 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1954 /* XXX: could force cpu_single_env to NULL to avoid
1956 if (l >= 4 && ((addr & 3) == 0)) {
1957 /* 32 bit write access */
1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1962 /* 16 bit write access */
1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1967 /* 8 bit write access */
1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1976 ptr = phys_ram_base + addr1;
1977 memcpy(ptr, buf, l);
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2001 /* 8 bit read access */
2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2019 /* warning: addr must be aligned */
2020 uint32_t ldl_phys(target_phys_addr_t addr)
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2030 pd = IO_MEM_UNASSIGNED;
2032 pd = p->phys_offset;
2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2049 uint32_t ldub_phys(target_phys_addr_t addr)
2052 cpu_physical_memory_read(addr, &val, 1);
2057 uint32_t lduw_phys(target_phys_addr_t addr)
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2065 uint64_t ldq_phys(target_phys_addr_t addr)
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2072 /* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2084 pd = IO_MEM_UNASSIGNED;
2086 pd = p->phys_offset;
2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2099 /* warning: addr must be aligned */
2100 void stl_phys(target_phys_addr_t addr, uint32_t val)
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2109 pd = IO_MEM_UNASSIGNED;
2111 pd = p->phys_offset;
2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2121 ptr = phys_ram_base + addr1;
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
2134 void stb_phys(target_phys_addr_t addr, uint32_t val)
2137 cpu_physical_memory_write(addr, &v, 1);
2141 void stw_phys(target_phys_addr_t addr, uint32_t val)
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2148 void stq_phys(target_phys_addr_t addr, uint64_t val)
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2156 /* virtual memory access for debug */
2157 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
2161 target_ulong page, phys_addr;
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2181 void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2200 if (tb->tb_next_offset[0] != 0xffff) {
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2228 #if !defined(CONFIG_USER_ONLY)
2230 #define MMUSUFFIX _cmmu
2231 #define GETPC() NULL
2232 #define env cpu_single_env
2233 #define SOFTMMU_CODE_ACCESS
2236 #include "softmmu_template.h"
2239 #include "softmmu_template.h"
2242 #include "softmmu_template.h"
2245 #include "softmmu_template.h"