2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66 /* any access to the tbs or the page table must use this lock */
67 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
69 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70 uint8_t *code_gen_ptr;
74 uint8_t *phys_ram_base;
75 uint8_t *phys_ram_dirty;
77 typedef struct PageDesc {
78 /* list of TBs intersecting this ram page */
79 TranslationBlock *first_tb;
80 /* in order to optimize self modifying code, we count the number
81 of lookups we do to a given page to use a bitmap */
82 unsigned int code_write_count;
84 #if defined(CONFIG_USER_ONLY)
89 typedef struct PhysPageDesc {
90 /* offset in host memory of the page + io_index in the low 12 bits */
95 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97 #define L1_SIZE (1 << L1_BITS)
98 #define L2_SIZE (1 << L2_BITS)
100 static void io_mem_init(void);
102 unsigned long qemu_real_host_page_size;
103 unsigned long qemu_host_page_bits;
104 unsigned long qemu_host_page_size;
105 unsigned long qemu_host_page_mask;
107 /* XXX: for system emulation, it could just be an array */
108 static PageDesc *l1_map[L1_SIZE];
109 PhysPageDesc **l1_phys_map;
111 /* io memory support */
112 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
113 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
114 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
115 static int io_mem_nb;
118 char *logfilename = "/tmp/qemu.log";
123 static int tlb_flush_count;
124 static int tb_flush_count;
125 static int tb_phys_invalidate_count;
127 static void page_init(void)
129 /* NOTE: we can always suppose that qemu_host_page_size >=
133 SYSTEM_INFO system_info;
136 GetSystemInfo(&system_info);
137 qemu_real_host_page_size = system_info.dwPageSize;
139 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
140 PAGE_EXECUTE_READWRITE, &old_protect);
143 qemu_real_host_page_size = getpagesize();
145 unsigned long start, end;
147 start = (unsigned long)code_gen_buffer;
148 start &= ~(qemu_real_host_page_size - 1);
150 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
151 end += qemu_real_host_page_size - 1;
152 end &= ~(qemu_real_host_page_size - 1);
154 mprotect((void *)start, end - start,
155 PROT_READ | PROT_WRITE | PROT_EXEC);
159 if (qemu_host_page_size == 0)
160 qemu_host_page_size = qemu_real_host_page_size;
161 if (qemu_host_page_size < TARGET_PAGE_SIZE)
162 qemu_host_page_size = TARGET_PAGE_SIZE;
163 qemu_host_page_bits = 0;
164 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
165 qemu_host_page_bits++;
166 qemu_host_page_mask = ~(qemu_host_page_size - 1);
167 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
168 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
171 static inline PageDesc *page_find_alloc(unsigned int index)
175 lp = &l1_map[index >> L2_BITS];
178 /* allocate if not found */
179 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
180 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
183 return p + (index & (L2_SIZE - 1));
186 static inline PageDesc *page_find(unsigned int index)
190 p = l1_map[index >> L2_BITS];
193 return p + (index & (L2_SIZE - 1));
196 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
200 p = (void **)l1_phys_map;
201 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
203 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
204 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
206 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
209 /* allocate if not found */
212 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
213 memset(p, 0, sizeof(void *) * L1_SIZE);
217 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
220 /* allocate if not found */
223 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
224 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
227 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
230 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
232 return phys_page_find_alloc(index, 0);
235 #if !defined(CONFIG_USER_ONLY)
236 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
238 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
242 void cpu_exec_init(void)
245 code_gen_ptr = code_gen_buffer;
251 static inline void invalidate_page_bitmap(PageDesc *p)
253 if (p->code_bitmap) {
254 qemu_free(p->code_bitmap);
255 p->code_bitmap = NULL;
257 p->code_write_count = 0;
260 /* set to NULL all the 'first_tb' fields in all PageDescs */
261 static void page_flush_tb(void)
266 for(i = 0; i < L1_SIZE; i++) {
269 for(j = 0; j < L2_SIZE; j++) {
271 invalidate_page_bitmap(p);
278 /* flush all the translation blocks */
279 /* XXX: tb_flush is currently not thread safe */
280 void tb_flush(CPUState *env)
282 #if defined(DEBUG_FLUSH)
283 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
284 code_gen_ptr - code_gen_buffer,
286 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
289 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
291 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
294 code_gen_ptr = code_gen_buffer;
295 /* XXX: flush processor icache at this point if cache flush is
300 #ifdef DEBUG_TB_CHECK
302 static void tb_invalidate_check(unsigned long address)
304 TranslationBlock *tb;
306 address &= TARGET_PAGE_MASK;
307 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
310 address >= tb->pc + tb->size)) {
311 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
312 address, tb->pc, tb->size);
318 /* verify that all the pages have correct rights for code */
319 static void tb_page_check(void)
321 TranslationBlock *tb;
322 int i, flags1, flags2;
324 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
325 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
326 flags1 = page_get_flags(tb->pc);
327 flags2 = page_get_flags(tb->pc + tb->size - 1);
328 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
329 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
330 tb->pc, tb->size, flags1, flags2);
336 void tb_jmp_check(TranslationBlock *tb)
338 TranslationBlock *tb1;
341 /* suppress any remaining jumps to this TB */
345 tb1 = (TranslationBlock *)((long)tb1 & ~3);
348 tb1 = tb1->jmp_next[n1];
350 /* check end of list */
352 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
358 /* invalidate one TB */
359 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
362 TranslationBlock *tb1;
366 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
369 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
373 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
375 TranslationBlock *tb1;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 *ptb = tb1->page_next[n1];
386 ptb = &tb1->page_next[n1];
390 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
392 TranslationBlock *tb1, **ptb;
395 ptb = &tb->jmp_next[n];
398 /* find tb(n) in circular list */
402 tb1 = (TranslationBlock *)((long)tb1 & ~3);
403 if (n1 == n && tb1 == tb)
406 ptb = &tb1->jmp_first;
408 ptb = &tb1->jmp_next[n1];
411 /* now we can suppress tb(n) from the list */
412 *ptb = tb->jmp_next[n];
414 tb->jmp_next[n] = NULL;
418 /* reset the jump entry 'n' of a TB so that it is not chained to
420 static inline void tb_reset_jump(TranslationBlock *tb, int n)
422 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
425 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
429 target_ulong phys_pc;
430 TranslationBlock *tb1, *tb2;
432 /* remove the TB from the hash list */
433 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
434 h = tb_phys_hash_func(phys_pc);
435 tb_remove(&tb_phys_hash[h], tb,
436 offsetof(TranslationBlock, phys_hash_next));
438 /* remove the TB from the page list */
439 if (tb->page_addr[0] != page_addr) {
440 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
441 tb_page_remove(&p->first_tb, tb);
442 invalidate_page_bitmap(p);
444 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
445 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
446 tb_page_remove(&p->first_tb, tb);
447 invalidate_page_bitmap(p);
450 tb_invalidated_flag = 1;
452 /* remove the TB from the hash list */
453 h = tb_jmp_cache_hash_func(tb->pc);
454 cpu_single_env->tb_jmp_cache[h] = NULL;
456 /* suppress this TB from the two jump lists */
457 tb_jmp_remove(tb, 0);
458 tb_jmp_remove(tb, 1);
460 /* suppress any remaining jumps to this TB */
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 tb2 = tb1->jmp_next[n1];
468 tb_reset_jump(tb1, n1);
469 tb1->jmp_next[n1] = NULL;
472 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
474 tb_phys_invalidate_count++;
477 static inline void set_bits(uint8_t *tab, int start, int len)
483 mask = 0xff << (start & 7);
484 if ((start & ~7) == (end & ~7)) {
486 mask &= ~(0xff << (end & 7));
491 start = (start + 8) & ~7;
493 while (start < end1) {
498 mask = ~(0xff << (end & 7));
504 static void build_page_bitmap(PageDesc *p)
506 int n, tb_start, tb_end;
507 TranslationBlock *tb;
509 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
512 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
517 tb = (TranslationBlock *)((long)tb & ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start = tb->pc & ~TARGET_PAGE_MASK;
523 tb_end = tb_start + tb->size;
524 if (tb_end > TARGET_PAGE_SIZE)
525 tb_end = TARGET_PAGE_SIZE;
528 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
530 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
531 tb = tb->page_next[n];
535 #ifdef TARGET_HAS_PRECISE_SMC
537 static void tb_gen_code(CPUState *env,
538 target_ulong pc, target_ulong cs_base, int flags,
541 TranslationBlock *tb;
543 target_ulong phys_pc, phys_page2, virt_page2;
546 phys_pc = get_phys_addr_code(env, pc);
549 /* flush must be done */
551 /* cannot fail at this point */
554 tc_ptr = code_gen_ptr;
556 tb->cs_base = cs_base;
559 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
562 /* check next page if needed */
563 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
565 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
566 phys_page2 = get_phys_addr_code(env, virt_page2);
568 tb_link_phys(tb, phys_pc, phys_page2);
572 /* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578 int is_cpu_write_access)
580 int n, current_tb_modified, current_tb_not_found, current_flags;
581 CPUState *env = cpu_single_env;
583 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
584 target_ulong tb_start, tb_end;
585 target_ulong current_pc, current_cs_base;
587 p = page_find(start >> TARGET_PAGE_BITS);
590 if (!p->code_bitmap &&
591 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
592 is_cpu_write_access) {
593 /* build code bitmap */
594 build_page_bitmap(p);
597 /* we remove all the TBs in the range [start, end[ */
598 /* XXX: see if in some cases it could be faster to invalidate all the code */
599 current_tb_not_found = is_cpu_write_access;
600 current_tb_modified = 0;
601 current_tb = NULL; /* avoid warning */
602 current_pc = 0; /* avoid warning */
603 current_cs_base = 0; /* avoid warning */
604 current_flags = 0; /* avoid warning */
608 tb = (TranslationBlock *)((long)tb & ~3);
609 tb_next = tb->page_next[n];
610 /* NOTE: this is subtle as a TB may span two physical pages */
612 /* NOTE: tb_end may be after the end of the page, but
613 it is not a problem */
614 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
615 tb_end = tb_start + tb->size;
617 tb_start = tb->page_addr[1];
618 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
620 if (!(tb_end <= start || tb_start >= end)) {
621 #ifdef TARGET_HAS_PRECISE_SMC
622 if (current_tb_not_found) {
623 current_tb_not_found = 0;
625 if (env->mem_write_pc) {
626 /* now we have a real cpu fault */
627 current_tb = tb_find_pc(env->mem_write_pc);
630 if (current_tb == tb &&
631 !(current_tb->cflags & CF_SINGLE_INSN)) {
632 /* If we are modifying the current TB, we must stop
633 its execution. We could be more precise by checking
634 that the modification is after the current PC, but it
635 would require a specialized function to partially
636 restore the CPU state */
638 current_tb_modified = 1;
639 cpu_restore_state(current_tb, env,
640 env->mem_write_pc, NULL);
641 #if defined(TARGET_I386)
642 current_flags = env->hflags;
643 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
644 current_cs_base = (target_ulong)env->segs[R_CS].base;
645 current_pc = current_cs_base + env->eip;
647 #error unsupported CPU
650 #endif /* TARGET_HAS_PRECISE_SMC */
651 saved_tb = env->current_tb;
652 env->current_tb = NULL;
653 tb_phys_invalidate(tb, -1);
654 env->current_tb = saved_tb;
655 if (env->interrupt_request && env->current_tb)
656 cpu_interrupt(env, env->interrupt_request);
660 #if !defined(CONFIG_USER_ONLY)
661 /* if no code remaining, no need to continue to use slow writes */
663 invalidate_page_bitmap(p);
664 if (is_cpu_write_access) {
665 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
669 #ifdef TARGET_HAS_PRECISE_SMC
670 if (current_tb_modified) {
671 /* we generate a block containing just the instruction
672 modifying the memory. It will ensure that it cannot modify
674 env->current_tb = NULL;
675 tb_gen_code(env, current_pc, current_cs_base, current_flags,
677 cpu_resume_from_signal(env, NULL);
682 /* len must be <= 8 and start must be a multiple of len */
683 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
690 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
691 cpu_single_env->mem_write_vaddr, len,
693 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
697 p = page_find(start >> TARGET_PAGE_BITS);
700 if (p->code_bitmap) {
701 offset = start & ~TARGET_PAGE_MASK;
702 b = p->code_bitmap[offset >> 3] >> (offset & 7);
703 if (b & ((1 << len) - 1))
707 tb_invalidate_phys_page_range(start, start + len, 1);
711 #if !defined(CONFIG_SOFTMMU)
712 static void tb_invalidate_phys_page(target_ulong addr,
713 unsigned long pc, void *puc)
715 int n, current_flags, current_tb_modified;
716 target_ulong current_pc, current_cs_base;
718 TranslationBlock *tb, *current_tb;
719 #ifdef TARGET_HAS_PRECISE_SMC
720 CPUState *env = cpu_single_env;
723 addr &= TARGET_PAGE_MASK;
724 p = page_find(addr >> TARGET_PAGE_BITS);
728 current_tb_modified = 0;
730 current_pc = 0; /* avoid warning */
731 current_cs_base = 0; /* avoid warning */
732 current_flags = 0; /* avoid warning */
733 #ifdef TARGET_HAS_PRECISE_SMC
735 current_tb = tb_find_pc(pc);
740 tb = (TranslationBlock *)((long)tb & ~3);
741 #ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb == tb &&
743 !(current_tb->cflags & CF_SINGLE_INSN)) {
744 /* If we are modifying the current TB, we must stop
745 its execution. We could be more precise by checking
746 that the modification is after the current PC, but it
747 would require a specialized function to partially
748 restore the CPU state */
750 current_tb_modified = 1;
751 cpu_restore_state(current_tb, env, pc, puc);
752 #if defined(TARGET_I386)
753 current_flags = env->hflags;
754 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
755 current_cs_base = (target_ulong)env->segs[R_CS].base;
756 current_pc = current_cs_base + env->eip;
758 #error unsupported CPU
761 #endif /* TARGET_HAS_PRECISE_SMC */
762 tb_phys_invalidate(tb, addr);
763 tb = tb->page_next[n];
766 #ifdef TARGET_HAS_PRECISE_SMC
767 if (current_tb_modified) {
768 /* we generate a block containing just the instruction
769 modifying the memory. It will ensure that it cannot modify
771 env->current_tb = NULL;
772 tb_gen_code(env, current_pc, current_cs_base, current_flags,
774 cpu_resume_from_signal(env, puc);
780 /* add the tb in the target page and protect it if necessary */
781 static inline void tb_alloc_page(TranslationBlock *tb,
782 unsigned int n, unsigned int page_addr)
785 TranslationBlock *last_first_tb;
787 tb->page_addr[n] = page_addr;
788 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
789 tb->page_next[n] = p->first_tb;
790 last_first_tb = p->first_tb;
791 p->first_tb = (TranslationBlock *)((long)tb | n);
792 invalidate_page_bitmap(p);
794 #if defined(TARGET_HAS_SMC) || 1
796 #if defined(CONFIG_USER_ONLY)
797 if (p->flags & PAGE_WRITE) {
798 unsigned long host_start, host_end, addr;
801 /* force the host page as non writable (writes will have a
802 page fault + mprotect overhead) */
803 host_start = page_addr & qemu_host_page_mask;
804 host_end = host_start + qemu_host_page_size;
806 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
807 prot |= page_get_flags(addr);
808 mprotect((void *)host_start, qemu_host_page_size,
809 (prot & PAGE_BITS) & ~PAGE_WRITE);
810 #ifdef DEBUG_TB_INVALIDATE
811 printf("protecting code page: 0x%08lx\n",
814 p->flags &= ~PAGE_WRITE;
817 /* if some code is already present, then the pages are already
818 protected. So we handle the case where only the first TB is
819 allocated in a physical page */
820 if (!last_first_tb) {
821 target_ulong virt_addr;
823 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
824 tlb_protect_code(cpu_single_env, page_addr, virt_addr);
828 #endif /* TARGET_HAS_SMC */
831 /* Allocate a new translation block. Flush the translation buffer if
832 too many translation blocks or too much generated code. */
833 TranslationBlock *tb_alloc(target_ulong pc)
835 TranslationBlock *tb;
837 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
838 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
846 /* add a new TB and link it to the physical page tables. phys_page2 is
847 (-1) to indicate that only one page contains the TB. */
848 void tb_link_phys(TranslationBlock *tb,
849 target_ulong phys_pc, target_ulong phys_page2)
852 TranslationBlock **ptb;
854 /* add in the physical hash table */
855 h = tb_phys_hash_func(phys_pc);
856 ptb = &tb_phys_hash[h];
857 tb->phys_hash_next = *ptb;
860 /* add in the page list */
861 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
862 if (phys_page2 != -1)
863 tb_alloc_page(tb, 1, phys_page2);
865 tb->page_addr[1] = -1;
867 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
868 tb->jmp_next[0] = NULL;
869 tb->jmp_next[1] = NULL;
871 tb->cflags &= ~CF_FP_USED;
872 if (tb->cflags & CF_TB_FP_USED)
873 tb->cflags |= CF_FP_USED;
876 /* init original jump addresses */
877 if (tb->tb_next_offset[0] != 0xffff)
878 tb_reset_jump(tb, 0);
879 if (tb->tb_next_offset[1] != 0xffff)
880 tb_reset_jump(tb, 1);
882 #ifdef DEBUG_TB_CHECK
887 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
888 tb[1].tc_ptr. Return NULL if not found */
889 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
893 TranslationBlock *tb;
897 if (tc_ptr < (unsigned long)code_gen_buffer ||
898 tc_ptr >= (unsigned long)code_gen_ptr)
900 /* binary search (cf Knuth) */
903 while (m_min <= m_max) {
904 m = (m_min + m_max) >> 1;
906 v = (unsigned long)tb->tc_ptr;
909 else if (tc_ptr < v) {
918 static void tb_reset_jump_recursive(TranslationBlock *tb);
920 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
922 TranslationBlock *tb1, *tb_next, **ptb;
925 tb1 = tb->jmp_next[n];
927 /* find head of list */
930 tb1 = (TranslationBlock *)((long)tb1 & ~3);
933 tb1 = tb1->jmp_next[n1];
935 /* we are now sure now that tb jumps to tb1 */
938 /* remove tb from the jmp_first list */
939 ptb = &tb_next->jmp_first;
943 tb1 = (TranslationBlock *)((long)tb1 & ~3);
944 if (n1 == n && tb1 == tb)
946 ptb = &tb1->jmp_next[n1];
948 *ptb = tb->jmp_next[n];
949 tb->jmp_next[n] = NULL;
951 /* suppress the jump to next tb in generated code */
952 tb_reset_jump(tb, n);
954 /* suppress jumps in the tb on which we could have jumped */
955 tb_reset_jump_recursive(tb_next);
959 static void tb_reset_jump_recursive(TranslationBlock *tb)
961 tb_reset_jump_recursive2(tb, 0);
962 tb_reset_jump_recursive2(tb, 1);
965 #if defined(TARGET_HAS_ICE)
966 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
968 target_ulong phys_addr;
970 phys_addr = cpu_get_phys_page_debug(env, pc);
971 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
975 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
976 breakpoint is reached */
977 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
979 #if defined(TARGET_HAS_ICE)
982 for(i = 0; i < env->nb_breakpoints; i++) {
983 if (env->breakpoints[i] == pc)
987 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
989 env->breakpoints[env->nb_breakpoints++] = pc;
991 breakpoint_invalidate(env, pc);
998 /* remove a breakpoint */
999 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1001 #if defined(TARGET_HAS_ICE)
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1009 env->nb_breakpoints--;
1010 if (i < env->nb_breakpoints)
1011 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1013 breakpoint_invalidate(env, pc);
1020 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1021 CPU loop after each instruction */
1022 void cpu_single_step(CPUState *env, int enabled)
1024 #if defined(TARGET_HAS_ICE)
1025 if (env->singlestep_enabled != enabled) {
1026 env->singlestep_enabled = enabled;
1027 /* must flush all the translated code to avoid inconsistancies */
1028 /* XXX: only flush what is necessary */
1034 /* enable or disable low levels log */
1035 void cpu_set_log(int log_flags)
1037 loglevel = log_flags;
1038 if (loglevel && !logfile) {
1039 logfile = fopen(logfilename, "w");
1041 perror(logfilename);
1044 #if !defined(CONFIG_SOFTMMU)
1045 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1047 static uint8_t logfile_buf[4096];
1048 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1051 setvbuf(logfile, NULL, _IOLBF, 0);
1056 void cpu_set_log_filename(const char *filename)
1058 logfilename = strdup(filename);
1061 /* mask must never be zero, except for A20 change call */
1062 void cpu_interrupt(CPUState *env, int mask)
1064 TranslationBlock *tb;
1065 static int interrupt_lock;
1067 env->interrupt_request |= mask;
1068 /* if the cpu is currently executing code, we must unlink it and
1069 all the potentially executing TB */
1070 tb = env->current_tb;
1071 if (tb && !testandset(&interrupt_lock)) {
1072 env->current_tb = NULL;
1073 tb_reset_jump_recursive(tb);
1078 void cpu_reset_interrupt(CPUState *env, int mask)
1080 env->interrupt_request &= ~mask;
1083 CPULogItem cpu_log_items[] = {
1084 { CPU_LOG_TB_OUT_ASM, "out_asm",
1085 "show generated host assembly code for each compiled TB" },
1086 { CPU_LOG_TB_IN_ASM, "in_asm",
1087 "show target assembly code for each compiled TB" },
1088 { CPU_LOG_TB_OP, "op",
1089 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1091 { CPU_LOG_TB_OP_OPT, "op_opt",
1092 "show micro ops after optimization for each compiled TB" },
1094 { CPU_LOG_INT, "int",
1095 "show interrupts/exceptions in short format" },
1096 { CPU_LOG_EXEC, "exec",
1097 "show trace before each executed TB (lots of logs)" },
1098 { CPU_LOG_TB_CPU, "cpu",
1099 "show CPU state before bloc translation" },
1101 { CPU_LOG_PCALL, "pcall",
1102 "show protected mode far calls/returns/exceptions" },
1105 { CPU_LOG_IOPORT, "ioport",
1106 "show all i/o ports accesses" },
1111 static int cmp1(const char *s1, int n, const char *s2)
1113 if (strlen(s2) != n)
1115 return memcmp(s1, s2, n) == 0;
1118 /* takes a comma separated list of log masks. Return 0 if error. */
1119 int cpu_str_to_log_mask(const char *str)
1128 p1 = strchr(p, ',');
1131 if(cmp1(p,p1-p,"all")) {
1132 for(item = cpu_log_items; item->mask != 0; item++) {
1136 for(item = cpu_log_items; item->mask != 0; item++) {
1137 if (cmp1(p, p1 - p, item->name))
1151 void cpu_abort(CPUState *env, const char *fmt, ...)
1156 fprintf(stderr, "qemu: fatal: ");
1157 vfprintf(stderr, fmt, ap);
1158 fprintf(stderr, "\n");
1160 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1162 cpu_dump_state(env, stderr, fprintf, 0);
1168 #if !defined(CONFIG_USER_ONLY)
1170 /* NOTE: if flush_global is true, also flush global entries (not
1172 void tlb_flush(CPUState *env, int flush_global)
1176 #if defined(DEBUG_TLB)
1177 printf("tlb_flush:\n");
1179 /* must reset current TB so that interrupts cannot modify the
1180 links while we are modifying them */
1181 env->current_tb = NULL;
1183 for(i = 0; i < CPU_TLB_SIZE; i++) {
1184 env->tlb_read[0][i].address = -1;
1185 env->tlb_write[0][i].address = -1;
1186 env->tlb_read[1][i].address = -1;
1187 env->tlb_write[1][i].address = -1;
1190 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1192 #if !defined(CONFIG_SOFTMMU)
1193 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1196 if (env->kqemu_enabled) {
1197 kqemu_flush(env, flush_global);
1203 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1205 if (addr == (tlb_entry->address &
1206 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1207 tlb_entry->address = -1;
1210 void tlb_flush_page(CPUState *env, target_ulong addr)
1213 TranslationBlock *tb;
1215 #if defined(DEBUG_TLB)
1216 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1218 /* must reset current TB so that interrupts cannot modify the
1219 links while we are modifying them */
1220 env->current_tb = NULL;
1222 addr &= TARGET_PAGE_MASK;
1223 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1224 tlb_flush_entry(&env->tlb_read[0][i], addr);
1225 tlb_flush_entry(&env->tlb_write[0][i], addr);
1226 tlb_flush_entry(&env->tlb_read[1][i], addr);
1227 tlb_flush_entry(&env->tlb_write[1][i], addr);
1229 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1230 tb = env->tb_jmp_cache[i];
1232 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1233 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1234 env->tb_jmp_cache[i] = NULL;
1238 #if !defined(CONFIG_SOFTMMU)
1239 if (addr < MMAP_AREA_END)
1240 munmap((void *)addr, TARGET_PAGE_SIZE);
1243 if (env->kqemu_enabled) {
1244 kqemu_flush_page(env, addr);
1249 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1251 if (addr == (tlb_entry->address &
1252 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1253 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1254 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1258 /* update the TLBs so that writes to code in the virtual page 'addr'
1260 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
1265 vaddr &= TARGET_PAGE_MASK;
1266 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1267 tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1268 tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1271 if (env->kqemu_enabled) {
1272 kqemu_set_notdirty(env, ram_addr);
1275 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
1277 #if !defined(CONFIG_SOFTMMU)
1278 /* NOTE: as we generated the code for this page, it is already at
1280 if (vaddr < MMAP_AREA_END)
1281 mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
1285 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1286 tested for self modifying code */
1287 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1290 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1293 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1294 unsigned long start, unsigned long length)
1297 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1298 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1299 if ((addr - start) < length) {
1300 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1305 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1309 unsigned long length, start1;
1313 start &= TARGET_PAGE_MASK;
1314 end = TARGET_PAGE_ALIGN(end);
1316 length = end - start;
1319 len = length >> TARGET_PAGE_BITS;
1320 env = cpu_single_env;
1322 if (env->kqemu_enabled) {
1325 for(i = 0; i < len; i++) {
1326 kqemu_set_notdirty(env, addr);
1327 addr += TARGET_PAGE_SIZE;
1331 mask = ~dirty_flags;
1332 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1333 for(i = 0; i < len; i++)
1336 /* we modify the TLB cache so that the dirty bit will be set again
1337 when accessing the range */
1338 start1 = start + (unsigned long)phys_ram_base;
1339 for(i = 0; i < CPU_TLB_SIZE; i++)
1340 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1341 for(i = 0; i < CPU_TLB_SIZE; i++)
1342 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1344 #if !defined(CONFIG_SOFTMMU)
1345 /* XXX: this is expensive */
1351 for(i = 0; i < L1_SIZE; i++) {
1354 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1355 for(j = 0; j < L2_SIZE; j++) {
1356 if (p->valid_tag == virt_valid_tag &&
1357 p->phys_addr >= start && p->phys_addr < end &&
1358 (p->prot & PROT_WRITE)) {
1359 if (addr < MMAP_AREA_END) {
1360 mprotect((void *)addr, TARGET_PAGE_SIZE,
1361 p->prot & ~PROT_WRITE);
1364 addr += TARGET_PAGE_SIZE;
1373 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1375 ram_addr_t ram_addr;
1377 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1378 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1379 tlb_entry->addend - (unsigned long)phys_ram_base;
1380 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1381 tlb_entry->address |= IO_MEM_NOTDIRTY;
1386 /* update the TLB according to the current state of the dirty bits */
1387 void cpu_tlb_update_dirty(CPUState *env)
1390 for(i = 0; i < CPU_TLB_SIZE; i++)
1391 tlb_update_dirty(&env->tlb_write[0][i]);
1392 for(i = 0; i < CPU_TLB_SIZE; i++)
1393 tlb_update_dirty(&env->tlb_write[1][i]);
1396 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1397 unsigned long start)
1400 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1401 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1402 if (addr == start) {
1403 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1408 /* update the TLB corresponding to virtual page vaddr and phys addr
1409 addr so that it is no longer dirty */
1410 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1412 CPUState *env = cpu_single_env;
1415 addr &= TARGET_PAGE_MASK;
1416 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1417 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1418 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1421 /* add a new TLB entry. At most one entry for a given virtual address
1422 is permitted. Return 0 if OK or 2 if the page could not be mapped
1423 (can only happen in non SOFTMMU mode for I/O pages or pages
1424 conflicting with the host address space). */
1425 int tlb_set_page(CPUState *env, target_ulong vaddr,
1426 target_phys_addr_t paddr, int prot,
1427 int is_user, int is_softmmu)
1432 target_ulong address;
1433 target_phys_addr_t addend;
1436 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1438 pd = IO_MEM_UNASSIGNED;
1440 pd = p->phys_offset;
1442 #if defined(DEBUG_TLB)
1443 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1444 vaddr, paddr, prot, is_user, is_softmmu, pd);
1448 #if !defined(CONFIG_SOFTMMU)
1452 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1453 /* IO memory case */
1454 address = vaddr | pd;
1457 /* standard memory */
1459 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1462 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1464 if (prot & PAGE_READ) {
1465 env->tlb_read[is_user][index].address = address;
1466 env->tlb_read[is_user][index].addend = addend;
1468 env->tlb_read[is_user][index].address = -1;
1469 env->tlb_read[is_user][index].addend = -1;
1471 if (prot & PAGE_WRITE) {
1472 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1473 /* ROM: access is ignored (same as unassigned) */
1474 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1475 env->tlb_write[is_user][index].addend = addend;
1476 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1477 !cpu_physical_memory_is_dirty(pd)) {
1478 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1479 env->tlb_write[is_user][index].addend = addend;
1481 env->tlb_write[is_user][index].address = address;
1482 env->tlb_write[is_user][index].addend = addend;
1485 env->tlb_write[is_user][index].address = -1;
1486 env->tlb_write[is_user][index].addend = -1;
1489 #if !defined(CONFIG_SOFTMMU)
1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1492 /* IO access: no mapping is done as it will be handled by the
1494 if (!(env->hflags & HF_SOFTMMU_MASK))
1499 if (vaddr >= MMAP_AREA_END) {
1502 if (prot & PROT_WRITE) {
1503 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1504 #if defined(TARGET_HAS_SMC) || 1
1507 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1508 !cpu_physical_memory_is_dirty(pd))) {
1509 /* ROM: we do as if code was inside */
1510 /* if code is present, we only map as read only and save the
1514 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1517 vp->valid_tag = virt_valid_tag;
1518 prot &= ~PAGE_WRITE;
1521 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1522 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1523 if (map_addr == MAP_FAILED) {
1524 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1534 /* called from signal handler: invalidate the code and unprotect the
1535 page. Return TRUE if the fault was succesfully handled. */
1536 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1538 #if !defined(CONFIG_SOFTMMU)
1541 #if defined(DEBUG_TLB)
1542 printf("page_unprotect: addr=0x%08x\n", addr);
1544 addr &= TARGET_PAGE_MASK;
1546 /* if it is not mapped, no need to worry here */
1547 if (addr >= MMAP_AREA_END)
1549 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1552 /* NOTE: in this case, validate_tag is _not_ tested as it
1553 validates only the code TLB */
1554 if (vp->valid_tag != virt_valid_tag)
1556 if (!(vp->prot & PAGE_WRITE))
1558 #if defined(DEBUG_TLB)
1559 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1560 addr, vp->phys_addr, vp->prot);
1562 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1563 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1564 (unsigned long)addr, vp->prot);
1565 /* set the dirty bit */
1566 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1567 /* flush the code inside */
1568 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1577 void tlb_flush(CPUState *env, int flush_global)
1581 void tlb_flush_page(CPUState *env, target_ulong addr)
1585 int tlb_set_page(CPUState *env, target_ulong vaddr,
1586 target_phys_addr_t paddr, int prot,
1587 int is_user, int is_softmmu)
1592 /* dump memory mappings */
1593 void page_dump(FILE *f)
1595 unsigned long start, end;
1596 int i, j, prot, prot1;
1599 fprintf(f, "%-8s %-8s %-8s %s\n",
1600 "start", "end", "size", "prot");
1604 for(i = 0; i <= L1_SIZE; i++) {
1609 for(j = 0;j < L2_SIZE; j++) {
1614 if (prot1 != prot) {
1615 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1617 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1618 start, end, end - start,
1619 prot & PAGE_READ ? 'r' : '-',
1620 prot & PAGE_WRITE ? 'w' : '-',
1621 prot & PAGE_EXEC ? 'x' : '-');
1635 int page_get_flags(unsigned long address)
1639 p = page_find(address >> TARGET_PAGE_BITS);
1645 /* modify the flags of a page and invalidate the code if
1646 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1647 depending on PAGE_WRITE */
1648 void page_set_flags(unsigned long start, unsigned long end, int flags)
1653 start = start & TARGET_PAGE_MASK;
1654 end = TARGET_PAGE_ALIGN(end);
1655 if (flags & PAGE_WRITE)
1656 flags |= PAGE_WRITE_ORG;
1657 spin_lock(&tb_lock);
1658 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1659 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1660 /* if the write protection is set, then we invalidate the code
1662 if (!(p->flags & PAGE_WRITE) &&
1663 (flags & PAGE_WRITE) &&
1665 tb_invalidate_phys_page(addr, 0, NULL);
1669 spin_unlock(&tb_lock);
1672 /* called from signal handler: invalidate the code and unprotect the
1673 page. Return TRUE if the fault was succesfully handled. */
1674 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1676 unsigned int page_index, prot, pindex;
1678 unsigned long host_start, host_end, addr;
1680 host_start = address & qemu_host_page_mask;
1681 page_index = host_start >> TARGET_PAGE_BITS;
1682 p1 = page_find(page_index);
1685 host_end = host_start + qemu_host_page_size;
1688 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1692 /* if the page was really writable, then we change its
1693 protection back to writable */
1694 if (prot & PAGE_WRITE_ORG) {
1695 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1696 if (!(p1[pindex].flags & PAGE_WRITE)) {
1697 mprotect((void *)host_start, qemu_host_page_size,
1698 (prot & PAGE_BITS) | PAGE_WRITE);
1699 p1[pindex].flags |= PAGE_WRITE;
1700 /* and since the content will be modified, we must invalidate
1701 the corresponding translated code. */
1702 tb_invalidate_phys_page(address, pc, puc);
1703 #ifdef DEBUG_TB_CHECK
1704 tb_invalidate_check(address);
1712 /* call this function when system calls directly modify a memory area */
1713 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1715 unsigned long start, end, addr;
1717 start = (unsigned long)data;
1718 end = start + data_size;
1719 start &= TARGET_PAGE_MASK;
1720 end = TARGET_PAGE_ALIGN(end);
1721 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1722 page_unprotect(addr, 0, NULL);
1726 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1729 #endif /* defined(CONFIG_USER_ONLY) */
1731 /* register physical memory. 'size' must be a multiple of the target
1732 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1734 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1736 unsigned long phys_offset)
1738 target_phys_addr_t addr, end_addr;
1741 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1742 end_addr = start_addr + size;
1743 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1744 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1745 p->phys_offset = phys_offset;
1746 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1747 phys_offset += TARGET_PAGE_SIZE;
1751 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1756 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1760 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1761 unassigned_mem_readb,
1762 unassigned_mem_readb,
1763 unassigned_mem_readb,
1766 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1767 unassigned_mem_writeb,
1768 unassigned_mem_writeb,
1769 unassigned_mem_writeb,
1772 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1774 unsigned long ram_addr;
1776 ram_addr = addr - (unsigned long)phys_ram_base;
1777 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1778 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1779 #if !defined(CONFIG_USER_ONLY)
1780 tb_invalidate_phys_page_fast(ram_addr, 1);
1781 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1784 stb_p((uint8_t *)(long)addr, val);
1785 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1786 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1787 /* we remove the notdirty callback only if the code has been
1789 if (dirty_flags == 0xff)
1790 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1793 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1795 unsigned long ram_addr;
1797 ram_addr = addr - (unsigned long)phys_ram_base;
1798 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1799 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1800 #if !defined(CONFIG_USER_ONLY)
1801 tb_invalidate_phys_page_fast(ram_addr, 2);
1802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1805 stw_p((uint8_t *)(long)addr, val);
1806 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1807 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1808 /* we remove the notdirty callback only if the code has been
1810 if (dirty_flags == 0xff)
1811 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1814 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1816 unsigned long ram_addr;
1818 ram_addr = addr - (unsigned long)phys_ram_base;
1819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1821 #if !defined(CONFIG_USER_ONLY)
1822 tb_invalidate_phys_page_fast(ram_addr, 4);
1823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1826 stl_p((uint8_t *)(long)addr, val);
1827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829 /* we remove the notdirty callback only if the code has been
1831 if (dirty_flags == 0xff)
1832 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1835 static CPUReadMemoryFunc *error_mem_read[3] = {
1836 NULL, /* never used */
1837 NULL, /* never used */
1838 NULL, /* never used */
1841 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1842 notdirty_mem_writeb,
1843 notdirty_mem_writew,
1844 notdirty_mem_writel,
1847 static void io_mem_init(void)
1849 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1850 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1851 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1854 /* alloc dirty bits array */
1855 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1856 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1859 /* mem_read and mem_write are arrays of functions containing the
1860 function to access byte (index 0), word (index 1) and dword (index
1861 2). All functions must be supplied. If io_index is non zero, the
1862 corresponding io zone is modified. If it is zero, a new io zone is
1863 allocated. The return value can be used with
1864 cpu_register_physical_memory(). (-1) is returned if error. */
1865 int cpu_register_io_memory(int io_index,
1866 CPUReadMemoryFunc **mem_read,
1867 CPUWriteMemoryFunc **mem_write,
1872 if (io_index <= 0) {
1873 if (io_index >= IO_MEM_NB_ENTRIES)
1875 io_index = io_mem_nb++;
1877 if (io_index >= IO_MEM_NB_ENTRIES)
1881 for(i = 0;i < 3; i++) {
1882 io_mem_read[io_index][i] = mem_read[i];
1883 io_mem_write[io_index][i] = mem_write[i];
1885 io_mem_opaque[io_index] = opaque;
1886 return io_index << IO_MEM_SHIFT;
1889 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1891 return io_mem_write[io_index >> IO_MEM_SHIFT];
1894 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1896 return io_mem_read[io_index >> IO_MEM_SHIFT];
1899 /* physical memory access (slow version, mainly for debug) */
1900 #if defined(CONFIG_USER_ONLY)
1901 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1902 int len, int is_write)
1908 page = addr & TARGET_PAGE_MASK;
1909 l = (page + TARGET_PAGE_SIZE) - addr;
1912 flags = page_get_flags(page);
1913 if (!(flags & PAGE_VALID))
1916 if (!(flags & PAGE_WRITE))
1918 memcpy((uint8_t *)addr, buf, len);
1920 if (!(flags & PAGE_READ))
1922 memcpy(buf, (uint8_t *)addr, len);
1931 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1932 int len, int is_write)
1937 target_phys_addr_t page;
1942 page = addr & TARGET_PAGE_MASK;
1943 l = (page + TARGET_PAGE_SIZE) - addr;
1946 p = phys_page_find(page >> TARGET_PAGE_BITS);
1948 pd = IO_MEM_UNASSIGNED;
1950 pd = p->phys_offset;
1954 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1955 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1956 if (l >= 4 && ((addr & 3) == 0)) {
1957 /* 32 bit write access */
1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1962 /* 16 bit write access */
1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1967 /* 8 bit write access */
1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1976 ptr = phys_ram_base + addr1;
1977 memcpy(ptr, buf, l);
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2001 /* 8 bit read access */
2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2019 /* warning: addr must be aligned */
2020 uint32_t ldl_phys(target_phys_addr_t addr)
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2030 pd = IO_MEM_UNASSIGNED;
2032 pd = p->phys_offset;
2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2049 uint32_t ldub_phys(target_phys_addr_t addr)
2052 cpu_physical_memory_read(addr, &val, 1);
2057 uint32_t lduw_phys(target_phys_addr_t addr)
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2065 uint64_t ldq_phys(target_phys_addr_t addr)
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2072 /* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2084 pd = IO_MEM_UNASSIGNED;
2086 pd = p->phys_offset;
2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2099 /* warning: addr must be aligned */
2100 void stl_phys(target_phys_addr_t addr, uint32_t val)
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2109 pd = IO_MEM_UNASSIGNED;
2111 pd = p->phys_offset;
2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2121 ptr = phys_ram_base + addr1;
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
2134 void stb_phys(target_phys_addr_t addr, uint32_t val)
2137 cpu_physical_memory_write(addr, &v, 1);
2141 void stw_phys(target_phys_addr_t addr, uint32_t val)
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2148 void stq_phys(target_phys_addr_t addr, uint64_t val)
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2156 /* virtual memory access for debug */
2157 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
2161 target_ulong page, phys_addr;
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2181 void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2200 if (tb->tb_next_offset[0] != 0xffff) {
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2228 #if !defined(CONFIG_USER_ONLY)
2230 #define MMUSUFFIX _cmmu
2231 #define GETPC() NULL
2232 #define env cpu_single_env
2233 #define SOFTMMU_CODE_ACCESS
2236 #include "softmmu_template.h"
2239 #include "softmmu_template.h"
2242 #include "softmmu_template.h"
2245 #include "softmmu_template.h"