2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 //#define DEBUG_TB_INVALIDATE
41 /* make various TB consistency checks */
42 //#define DEBUG_TB_CHECK
43 //#define DEBUG_TLB_CHECK
45 /* threshold to flush the translated code buffer */
46 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48 #define SMC_BITMAP_USE_THRESHOLD 10
50 #define MMAP_AREA_START 0x00000000
51 #define MMAP_AREA_END 0xa8000000
53 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
54 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
55 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57 /* any access to the tbs or the page table must use this lock */
58 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
60 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
61 uint8_t *code_gen_ptr;
65 uint8_t *phys_ram_base;
66 uint8_t *phys_ram_dirty;
68 typedef struct PageDesc {
69 /* list of TBs intersecting this ram page */
70 TranslationBlock *first_tb;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
75 #if defined(CONFIG_USER_ONLY)
80 typedef struct PhysPageDesc {
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset;
85 typedef struct VirtPageDesc {
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr;
89 unsigned int valid_tag;
90 #if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
98 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100 #define L1_SIZE (1 << L1_BITS)
101 #define L2_SIZE (1 << L2_BITS)
103 static void io_mem_init(void);
105 unsigned long qemu_real_host_page_size;
106 unsigned long qemu_host_page_bits;
107 unsigned long qemu_host_page_size;
108 unsigned long qemu_host_page_mask;
110 /* XXX: for system emulation, it could just be an array */
111 static PageDesc *l1_map[L1_SIZE];
112 static PhysPageDesc *l1_phys_map[L1_SIZE];
114 #if !defined(CONFIG_USER_ONLY)
115 static VirtPageDesc *l1_virt_map[L1_SIZE];
116 static unsigned int virt_valid_tag;
119 /* io memory support */
120 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123 static int io_mem_nb;
126 char *logfilename = "/tmp/qemu.log";
130 static void page_init(void)
132 /* NOTE: we can always suppose that qemu_host_page_size >=
136 SYSTEM_INFO system_info;
139 GetSystemInfo(&system_info);
140 qemu_real_host_page_size = system_info.dwPageSize;
142 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
143 PAGE_EXECUTE_READWRITE, &old_protect);
146 qemu_real_host_page_size = getpagesize();
148 unsigned long start, end;
150 start = (unsigned long)code_gen_buffer;
151 start &= ~(qemu_real_host_page_size - 1);
153 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
154 end += qemu_real_host_page_size - 1;
155 end &= ~(qemu_real_host_page_size - 1);
157 mprotect((void *)start, end - start,
158 PROT_READ | PROT_WRITE | PROT_EXEC);
162 if (qemu_host_page_size == 0)
163 qemu_host_page_size = qemu_real_host_page_size;
164 if (qemu_host_page_size < TARGET_PAGE_SIZE)
165 qemu_host_page_size = TARGET_PAGE_SIZE;
166 qemu_host_page_bits = 0;
167 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
168 qemu_host_page_bits++;
169 qemu_host_page_mask = ~(qemu_host_page_size - 1);
170 #if !defined(CONFIG_USER_ONLY)
175 static inline PageDesc *page_find_alloc(unsigned int index)
179 lp = &l1_map[index >> L2_BITS];
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
187 return p + (index & (L2_SIZE - 1));
190 static inline PageDesc *page_find(unsigned int index)
194 p = l1_map[index >> L2_BITS];
197 return p + (index & (L2_SIZE - 1));
200 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
202 PhysPageDesc **lp, *p;
204 lp = &l1_phys_map[index >> L2_BITS];
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
212 return p + (index & (L2_SIZE - 1));
215 static inline PhysPageDesc *phys_page_find(unsigned int index)
219 p = l1_phys_map[index >> L2_BITS];
222 return p + (index & (L2_SIZE - 1));
225 #if !defined(CONFIG_USER_ONLY)
226 static void tlb_protect_code(CPUState *env, target_ulong addr);
227 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
229 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
231 VirtPageDesc **lp, *p;
233 lp = &l1_virt_map[index >> L2_BITS];
236 /* allocate if not found */
237 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
238 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
241 return p + (index & (L2_SIZE - 1));
244 static inline VirtPageDesc *virt_page_find(unsigned int index)
248 p = l1_virt_map[index >> L2_BITS];
251 return p + (index & (L2_SIZE - 1));
254 static void virt_page_flush(void)
261 if (virt_valid_tag == 0) {
263 for(i = 0; i < L1_SIZE; i++) {
266 for(j = 0; j < L2_SIZE; j++)
273 static void virt_page_flush(void)
278 void cpu_exec_init(void)
281 code_gen_ptr = code_gen_buffer;
287 static inline void invalidate_page_bitmap(PageDesc *p)
289 if (p->code_bitmap) {
290 qemu_free(p->code_bitmap);
291 p->code_bitmap = NULL;
293 p->code_write_count = 0;
296 /* set to NULL all the 'first_tb' fields in all PageDescs */
297 static void page_flush_tb(void)
302 for(i = 0; i < L1_SIZE; i++) {
305 for(j = 0; j < L2_SIZE; j++) {
307 invalidate_page_bitmap(p);
314 /* flush all the translation blocks */
315 /* XXX: tb_flush is currently not thread safe */
316 void tb_flush(CPUState *env)
318 #if defined(DEBUG_FLUSH)
319 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
320 code_gen_ptr - code_gen_buffer,
322 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
325 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
328 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
331 code_gen_ptr = code_gen_buffer;
332 /* XXX: flush processor icache at this point if cache flush is
336 #ifdef DEBUG_TB_CHECK
338 static void tb_invalidate_check(unsigned long address)
340 TranslationBlock *tb;
342 address &= TARGET_PAGE_MASK;
343 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
344 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
345 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
346 address >= tb->pc + tb->size)) {
347 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
348 address, tb->pc, tb->size);
354 /* verify that all the pages have correct rights for code */
355 static void tb_page_check(void)
357 TranslationBlock *tb;
358 int i, flags1, flags2;
360 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
361 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
362 flags1 = page_get_flags(tb->pc);
363 flags2 = page_get_flags(tb->pc + tb->size - 1);
364 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
365 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
366 tb->pc, tb->size, flags1, flags2);
372 void tb_jmp_check(TranslationBlock *tb)
374 TranslationBlock *tb1;
377 /* suppress any remaining jumps to this TB */
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
384 tb1 = tb1->jmp_next[n1];
386 /* check end of list */
388 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
394 /* invalidate one TB */
395 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
398 TranslationBlock *tb1;
402 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
405 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
411 TranslationBlock *tb1;
417 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 *ptb = tb1->page_next[n1];
422 ptb = &tb1->page_next[n1];
426 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
428 TranslationBlock *tb1, **ptb;
431 ptb = &tb->jmp_next[n];
434 /* find tb(n) in circular list */
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == n && tb1 == tb)
442 ptb = &tb1->jmp_first;
444 ptb = &tb1->jmp_next[n1];
447 /* now we can suppress tb(n) from the list */
448 *ptb = tb->jmp_next[n];
450 tb->jmp_next[n] = NULL;
454 /* reset the jump entry 'n' of a TB so that it is not chained to
456 static inline void tb_reset_jump(TranslationBlock *tb, int n)
458 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
461 static inline void tb_invalidate(TranslationBlock *tb)
464 TranslationBlock *tb1, *tb2, **ptb;
466 tb_invalidated_flag = 1;
468 /* remove the TB from the hash list */
469 h = tb_hash_func(tb->pc);
473 /* NOTE: the TB is not necessarily linked in the hash. It
474 indicates that it is not currently used */
478 *ptb = tb1->hash_next;
481 ptb = &tb1->hash_next;
484 /* suppress this TB from the two jump lists */
485 tb_jmp_remove(tb, 0);
486 tb_jmp_remove(tb, 1);
488 /* suppress any remaining jumps to this TB */
494 tb1 = (TranslationBlock *)((long)tb1 & ~3);
495 tb2 = tb1->jmp_next[n1];
496 tb_reset_jump(tb1, n1);
497 tb1->jmp_next[n1] = NULL;
500 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
503 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
507 target_ulong phys_pc;
509 /* remove the TB from the hash list */
510 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
511 h = tb_phys_hash_func(phys_pc);
512 tb_remove(&tb_phys_hash[h], tb,
513 offsetof(TranslationBlock, phys_hash_next));
515 /* remove the TB from the page list */
516 if (tb->page_addr[0] != page_addr) {
517 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
518 tb_page_remove(&p->first_tb, tb);
519 invalidate_page_bitmap(p);
521 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
522 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
523 tb_page_remove(&p->first_tb, tb);
524 invalidate_page_bitmap(p);
530 static inline void set_bits(uint8_t *tab, int start, int len)
536 mask = 0xff << (start & 7);
537 if ((start & ~7) == (end & ~7)) {
539 mask &= ~(0xff << (end & 7));
544 start = (start + 8) & ~7;
546 while (start < end1) {
551 mask = ~(0xff << (end & 7));
557 static void build_page_bitmap(PageDesc *p)
559 int n, tb_start, tb_end;
560 TranslationBlock *tb;
562 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
565 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
570 tb = (TranslationBlock *)((long)tb & ~3);
571 /* NOTE: this is subtle as a TB may span two physical pages */
573 /* NOTE: tb_end may be after the end of the page, but
574 it is not a problem */
575 tb_start = tb->pc & ~TARGET_PAGE_MASK;
576 tb_end = tb_start + tb->size;
577 if (tb_end > TARGET_PAGE_SIZE)
578 tb_end = TARGET_PAGE_SIZE;
581 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
584 tb = tb->page_next[n];
588 #ifdef TARGET_HAS_PRECISE_SMC
590 static void tb_gen_code(CPUState *env,
591 target_ulong pc, target_ulong cs_base, int flags,
594 TranslationBlock *tb;
596 target_ulong phys_pc, phys_page2, virt_page2;
599 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
600 tb = tb_alloc((unsigned long)pc);
602 /* flush must be done */
604 /* cannot fail at this point */
605 tb = tb_alloc((unsigned long)pc);
607 tc_ptr = code_gen_ptr;
609 tb->cs_base = cs_base;
612 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
613 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615 /* check next page if needed */
616 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
618 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
619 phys_page2 = get_phys_addr_code(env, virt_page2);
621 tb_link_phys(tb, phys_pc, phys_page2);
625 /* invalidate all TBs which intersect with the target physical page
626 starting in range [start;end[. NOTE: start and end must refer to
627 the same physical page. 'is_cpu_write_access' should be true if called
628 from a real cpu write access: the virtual CPU will exit the current
629 TB if code is modified inside this TB. */
630 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
631 int is_cpu_write_access)
633 int n, current_tb_modified, current_tb_not_found, current_flags;
634 CPUState *env = cpu_single_env;
636 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
637 target_ulong tb_start, tb_end;
638 target_ulong current_pc, current_cs_base;
640 p = page_find(start >> TARGET_PAGE_BITS);
643 if (!p->code_bitmap &&
644 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
645 is_cpu_write_access) {
646 /* build code bitmap */
647 build_page_bitmap(p);
650 /* we remove all the TBs in the range [start, end[ */
651 /* XXX: see if in some cases it could be faster to invalidate all the code */
652 current_tb_not_found = is_cpu_write_access;
653 current_tb_modified = 0;
654 current_tb = NULL; /* avoid warning */
655 current_pc = 0; /* avoid warning */
656 current_cs_base = 0; /* avoid warning */
657 current_flags = 0; /* avoid warning */
661 tb = (TranslationBlock *)((long)tb & ~3);
662 tb_next = tb->page_next[n];
663 /* NOTE: this is subtle as a TB may span two physical pages */
665 /* NOTE: tb_end may be after the end of the page, but
666 it is not a problem */
667 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
668 tb_end = tb_start + tb->size;
670 tb_start = tb->page_addr[1];
671 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673 if (!(tb_end <= start || tb_start >= end)) {
674 #ifdef TARGET_HAS_PRECISE_SMC
675 if (current_tb_not_found) {
676 current_tb_not_found = 0;
678 if (env->mem_write_pc) {
679 /* now we have a real cpu fault */
680 current_tb = tb_find_pc(env->mem_write_pc);
683 if (current_tb == tb &&
684 !(current_tb->cflags & CF_SINGLE_INSN)) {
685 /* If we are modifying the current TB, we must stop
686 its execution. We could be more precise by checking
687 that the modification is after the current PC, but it
688 would require a specialized function to partially
689 restore the CPU state */
691 current_tb_modified = 1;
692 cpu_restore_state(current_tb, env,
693 env->mem_write_pc, NULL);
694 #if defined(TARGET_I386)
695 current_flags = env->hflags;
696 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
697 current_cs_base = (target_ulong)env->segs[R_CS].base;
698 current_pc = current_cs_base + env->eip;
700 #error unsupported CPU
703 #endif /* TARGET_HAS_PRECISE_SMC */
704 saved_tb = env->current_tb;
705 env->current_tb = NULL;
706 tb_phys_invalidate(tb, -1);
707 env->current_tb = saved_tb;
708 if (env->interrupt_request && env->current_tb)
709 cpu_interrupt(env, env->interrupt_request);
713 #if !defined(CONFIG_USER_ONLY)
714 /* if no code remaining, no need to continue to use slow writes */
716 invalidate_page_bitmap(p);
717 if (is_cpu_write_access) {
718 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
722 #ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_modified) {
724 /* we generate a block containing just the instruction
725 modifying the memory. It will ensure that it cannot modify
727 env->current_tb = NULL;
728 tb_gen_code(env, current_pc, current_cs_base, current_flags,
730 cpu_resume_from_signal(env, NULL);
735 /* len must be <= 8 and start must be a multiple of len */
736 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
743 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
744 cpu_single_env->mem_write_vaddr, len,
746 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
750 p = page_find(start >> TARGET_PAGE_BITS);
753 if (p->code_bitmap) {
754 offset = start & ~TARGET_PAGE_MASK;
755 b = p->code_bitmap[offset >> 3] >> (offset & 7);
756 if (b & ((1 << len) - 1))
760 tb_invalidate_phys_page_range(start, start + len, 1);
764 #if !defined(CONFIG_SOFTMMU)
765 static void tb_invalidate_phys_page(target_ulong addr,
766 unsigned long pc, void *puc)
768 int n, current_flags, current_tb_modified;
769 target_ulong current_pc, current_cs_base;
771 TranslationBlock *tb, *current_tb;
772 #ifdef TARGET_HAS_PRECISE_SMC
773 CPUState *env = cpu_single_env;
776 addr &= TARGET_PAGE_MASK;
777 p = page_find(addr >> TARGET_PAGE_BITS);
781 current_tb_modified = 0;
783 current_pc = 0; /* avoid warning */
784 current_cs_base = 0; /* avoid warning */
785 current_flags = 0; /* avoid warning */
786 #ifdef TARGET_HAS_PRECISE_SMC
788 current_tb = tb_find_pc(pc);
793 tb = (TranslationBlock *)((long)tb & ~3);
794 #ifdef TARGET_HAS_PRECISE_SMC
795 if (current_tb == tb &&
796 !(current_tb->cflags & CF_SINGLE_INSN)) {
797 /* If we are modifying the current TB, we must stop
798 its execution. We could be more precise by checking
799 that the modification is after the current PC, but it
800 would require a specialized function to partially
801 restore the CPU state */
803 current_tb_modified = 1;
804 cpu_restore_state(current_tb, env, pc, puc);
805 #if defined(TARGET_I386)
806 current_flags = env->hflags;
807 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
808 current_cs_base = (target_ulong)env->segs[R_CS].base;
809 current_pc = current_cs_base + env->eip;
811 #error unsupported CPU
814 #endif /* TARGET_HAS_PRECISE_SMC */
815 tb_phys_invalidate(tb, addr);
816 tb = tb->page_next[n];
819 #ifdef TARGET_HAS_PRECISE_SMC
820 if (current_tb_modified) {
821 /* we generate a block containing just the instruction
822 modifying the memory. It will ensure that it cannot modify
824 env->current_tb = NULL;
825 tb_gen_code(env, current_pc, current_cs_base, current_flags,
827 cpu_resume_from_signal(env, puc);
833 /* add the tb in the target page and protect it if necessary */
834 static inline void tb_alloc_page(TranslationBlock *tb,
835 unsigned int n, unsigned int page_addr)
838 TranslationBlock *last_first_tb;
840 tb->page_addr[n] = page_addr;
841 p = page_find(page_addr >> TARGET_PAGE_BITS);
842 tb->page_next[n] = p->first_tb;
843 last_first_tb = p->first_tb;
844 p->first_tb = (TranslationBlock *)((long)tb | n);
845 invalidate_page_bitmap(p);
847 #if defined(TARGET_HAS_SMC) || 1
849 #if defined(CONFIG_USER_ONLY)
850 if (p->flags & PAGE_WRITE) {
851 unsigned long host_start, host_end, addr;
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
856 host_start = page_addr & qemu_host_page_mask;
857 host_end = host_start + qemu_host_page_size;
859 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
860 prot |= page_get_flags(addr);
861 mprotect((void *)host_start, qemu_host_page_size,
862 (prot & PAGE_BITS) & ~PAGE_WRITE);
863 #ifdef DEBUG_TB_INVALIDATE
864 printf("protecting code page: 0x%08lx\n",
867 p->flags &= ~PAGE_WRITE;
870 /* if some code is already present, then the pages are already
871 protected. So we handle the case where only the first TB is
872 allocated in a physical page */
873 if (!last_first_tb) {
874 target_ulong virt_addr;
876 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
877 tlb_protect_code(cpu_single_env, virt_addr);
881 #endif /* TARGET_HAS_SMC */
884 /* Allocate a new translation block. Flush the translation buffer if
885 too many translation blocks or too much generated code. */
886 TranslationBlock *tb_alloc(unsigned long pc)
888 TranslationBlock *tb;
890 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
891 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
899 /* add a new TB and link it to the physical page tables. phys_page2 is
900 (-1) to indicate that only one page contains the TB. */
901 void tb_link_phys(TranslationBlock *tb,
902 target_ulong phys_pc, target_ulong phys_page2)
905 TranslationBlock **ptb;
907 /* add in the physical hash table */
908 h = tb_phys_hash_func(phys_pc);
909 ptb = &tb_phys_hash[h];
910 tb->phys_hash_next = *ptb;
913 /* add in the page list */
914 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
915 if (phys_page2 != -1)
916 tb_alloc_page(tb, 1, phys_page2);
918 tb->page_addr[1] = -1;
919 #ifdef DEBUG_TB_CHECK
924 /* link the tb with the other TBs */
925 void tb_link(TranslationBlock *tb)
927 #if !defined(CONFIG_USER_ONLY)
932 /* save the code memory mappings (needed to invalidate the code) */
933 addr = tb->pc & TARGET_PAGE_MASK;
934 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
935 #ifdef DEBUG_TLB_CHECK
936 if (vp->valid_tag == virt_valid_tag &&
937 vp->phys_addr != tb->page_addr[0]) {
938 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
939 addr, tb->page_addr[0], vp->phys_addr);
942 vp->phys_addr = tb->page_addr[0];
943 if (vp->valid_tag != virt_valid_tag) {
944 vp->valid_tag = virt_valid_tag;
945 #if !defined(CONFIG_SOFTMMU)
950 if (tb->page_addr[1] != -1) {
951 addr += TARGET_PAGE_SIZE;
952 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
953 #ifdef DEBUG_TLB_CHECK
954 if (vp->valid_tag == virt_valid_tag &&
955 vp->phys_addr != tb->page_addr[1]) {
956 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
957 addr, tb->page_addr[1], vp->phys_addr);
960 vp->phys_addr = tb->page_addr[1];
961 if (vp->valid_tag != virt_valid_tag) {
962 vp->valid_tag = virt_valid_tag;
963 #if !defined(CONFIG_SOFTMMU)
971 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972 tb->jmp_next[0] = NULL;
973 tb->jmp_next[1] = NULL;
975 tb->cflags &= ~CF_FP_USED;
976 if (tb->cflags & CF_TB_FP_USED)
977 tb->cflags |= CF_FP_USED;
980 /* init original jump addresses */
981 if (tb->tb_next_offset[0] != 0xffff)
982 tb_reset_jump(tb, 0);
983 if (tb->tb_next_offset[1] != 0xffff)
984 tb_reset_jump(tb, 1);
987 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
988 tb[1].tc_ptr. Return NULL if not found */
989 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
993 TranslationBlock *tb;
997 if (tc_ptr < (unsigned long)code_gen_buffer ||
998 tc_ptr >= (unsigned long)code_gen_ptr)
1000 /* binary search (cf Knuth) */
1003 while (m_min <= m_max) {
1004 m = (m_min + m_max) >> 1;
1006 v = (unsigned long)tb->tc_ptr;
1009 else if (tc_ptr < v) {
1018 static void tb_reset_jump_recursive(TranslationBlock *tb);
1020 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1022 TranslationBlock *tb1, *tb_next, **ptb;
1025 tb1 = tb->jmp_next[n];
1027 /* find head of list */
1030 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1033 tb1 = tb1->jmp_next[n1];
1035 /* we are now sure now that tb jumps to tb1 */
1038 /* remove tb from the jmp_first list */
1039 ptb = &tb_next->jmp_first;
1043 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1044 if (n1 == n && tb1 == tb)
1046 ptb = &tb1->jmp_next[n1];
1048 *ptb = tb->jmp_next[n];
1049 tb->jmp_next[n] = NULL;
1051 /* suppress the jump to next tb in generated code */
1052 tb_reset_jump(tb, n);
1054 /* suppress jumps in the tb on which we could have jumped */
1055 tb_reset_jump_recursive(tb_next);
1059 static void tb_reset_jump_recursive(TranslationBlock *tb)
1061 tb_reset_jump_recursive2(tb, 0);
1062 tb_reset_jump_recursive2(tb, 1);
1065 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1067 target_ulong phys_addr;
1069 phys_addr = cpu_get_phys_page_debug(env, pc);
1070 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1073 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1074 breakpoint is reached */
1075 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1077 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1080 for(i = 0; i < env->nb_breakpoints; i++) {
1081 if (env->breakpoints[i] == pc)
1085 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1087 env->breakpoints[env->nb_breakpoints++] = pc;
1089 breakpoint_invalidate(env, pc);
1096 /* remove a breakpoint */
1097 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1099 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1101 for(i = 0; i < env->nb_breakpoints; i++) {
1102 if (env->breakpoints[i] == pc)
1107 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1108 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1109 env->nb_breakpoints--;
1111 breakpoint_invalidate(env, pc);
1118 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1119 CPU loop after each instruction */
1120 void cpu_single_step(CPUState *env, int enabled)
1122 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1123 if (env->singlestep_enabled != enabled) {
1124 env->singlestep_enabled = enabled;
1125 /* must flush all the translated code to avoid inconsistancies */
1126 /* XXX: only flush what is necessary */
1132 /* enable or disable low levels log */
1133 void cpu_set_log(int log_flags)
1135 loglevel = log_flags;
1136 if (loglevel && !logfile) {
1137 logfile = fopen(logfilename, "w");
1139 perror(logfilename);
1142 #if !defined(CONFIG_SOFTMMU)
1143 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1145 static uint8_t logfile_buf[4096];
1146 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1149 setvbuf(logfile, NULL, _IOLBF, 0);
1154 void cpu_set_log_filename(const char *filename)
1156 logfilename = strdup(filename);
1159 /* mask must never be zero, except for A20 change call */
1160 void cpu_interrupt(CPUState *env, int mask)
1162 TranslationBlock *tb;
1163 static int interrupt_lock;
1165 env->interrupt_request |= mask;
1166 /* if the cpu is currently executing code, we must unlink it and
1167 all the potentially executing TB */
1168 tb = env->current_tb;
1169 if (tb && !testandset(&interrupt_lock)) {
1170 env->current_tb = NULL;
1171 tb_reset_jump_recursive(tb);
1176 void cpu_reset_interrupt(CPUState *env, int mask)
1178 env->interrupt_request &= ~mask;
1181 CPULogItem cpu_log_items[] = {
1182 { CPU_LOG_TB_OUT_ASM, "out_asm",
1183 "show generated host assembly code for each compiled TB" },
1184 { CPU_LOG_TB_IN_ASM, "in_asm",
1185 "show target assembly code for each compiled TB" },
1186 { CPU_LOG_TB_OP, "op",
1187 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1189 { CPU_LOG_TB_OP_OPT, "op_opt",
1190 "show micro ops after optimization for each compiled TB" },
1192 { CPU_LOG_INT, "int",
1193 "show interrupts/exceptions in short format" },
1194 { CPU_LOG_EXEC, "exec",
1195 "show trace before each executed TB (lots of logs)" },
1196 { CPU_LOG_TB_CPU, "cpu",
1197 "show CPU state before bloc translation" },
1199 { CPU_LOG_PCALL, "pcall",
1200 "show protected mode far calls/returns/exceptions" },
1202 { CPU_LOG_IOPORT, "ioport",
1203 "show all i/o ports accesses" },
1207 static int cmp1(const char *s1, int n, const char *s2)
1209 if (strlen(s2) != n)
1211 return memcmp(s1, s2, n) == 0;
1214 /* takes a comma separated list of log masks. Return 0 if error. */
1215 int cpu_str_to_log_mask(const char *str)
1224 p1 = strchr(p, ',');
1227 for(item = cpu_log_items; item->mask != 0; item++) {
1228 if (cmp1(p, p1 - p, item->name))
1241 void cpu_abort(CPUState *env, const char *fmt, ...)
1246 fprintf(stderr, "qemu: fatal: ");
1247 vfprintf(stderr, fmt, ap);
1248 fprintf(stderr, "\n");
1250 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1256 #if !defined(CONFIG_USER_ONLY)
1258 /* NOTE: if flush_global is true, also flush global entries (not
1260 void tlb_flush(CPUState *env, int flush_global)
1264 #if defined(DEBUG_TLB)
1265 printf("tlb_flush:\n");
1267 /* must reset current TB so that interrupts cannot modify the
1268 links while we are modifying them */
1269 env->current_tb = NULL;
1271 for(i = 0; i < CPU_TLB_SIZE; i++) {
1272 env->tlb_read[0][i].address = -1;
1273 env->tlb_write[0][i].address = -1;
1274 env->tlb_read[1][i].address = -1;
1275 env->tlb_write[1][i].address = -1;
1279 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1281 #if !defined(CONFIG_SOFTMMU)
1282 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1286 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1288 if (addr == (tlb_entry->address &
1289 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1290 tlb_entry->address = -1;
1293 void tlb_flush_page(CPUState *env, target_ulong addr)
1298 TranslationBlock *tb;
1300 #if defined(DEBUG_TLB)
1301 printf("tlb_flush_page: 0x%08x\n", addr);
1303 /* must reset current TB so that interrupts cannot modify the
1304 links while we are modifying them */
1305 env->current_tb = NULL;
1307 addr &= TARGET_PAGE_MASK;
1308 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1309 tlb_flush_entry(&env->tlb_read[0][i], addr);
1310 tlb_flush_entry(&env->tlb_write[0][i], addr);
1311 tlb_flush_entry(&env->tlb_read[1][i], addr);
1312 tlb_flush_entry(&env->tlb_write[1][i], addr);
1314 /* remove from the virtual pc hash table all the TB at this
1317 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1318 if (vp && vp->valid_tag == virt_valid_tag) {
1319 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1321 /* we remove all the links to the TBs in this virtual page */
1323 while (tb != NULL) {
1325 tb = (TranslationBlock *)((long)tb & ~3);
1326 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1327 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1330 tb = tb->page_next[n];
1336 #if !defined(CONFIG_SOFTMMU)
1337 if (addr < MMAP_AREA_END)
1338 munmap((void *)addr, TARGET_PAGE_SIZE);
1342 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1344 if (addr == (tlb_entry->address &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1346 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1347 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1348 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1352 /* update the TLBs so that writes to code in the virtual page 'addr'
1354 static void tlb_protect_code(CPUState *env, target_ulong addr)
1358 addr &= TARGET_PAGE_MASK;
1359 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1360 tlb_protect_code1(&env->tlb_write[0][i], addr);
1361 tlb_protect_code1(&env->tlb_write[1][i], addr);
1362 #if !defined(CONFIG_SOFTMMU)
1363 /* NOTE: as we generated the code for this page, it is already at
1365 if (addr < MMAP_AREA_END)
1366 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1370 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1371 unsigned long phys_addr)
1373 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1374 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1375 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1379 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1380 tested self modifying code */
1381 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1385 phys_addr &= TARGET_PAGE_MASK;
1386 phys_addr += (long)phys_ram_base;
1387 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1388 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1389 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1392 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1393 unsigned long start, unsigned long length)
1396 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1397 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1398 if ((addr - start) < length) {
1399 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1404 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1407 unsigned long length, start1;
1410 start &= TARGET_PAGE_MASK;
1411 end = TARGET_PAGE_ALIGN(end);
1413 length = end - start;
1416 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1418 env = cpu_single_env;
1419 /* we modify the TLB cache so that the dirty bit will be set again
1420 when accessing the range */
1421 start1 = start + (unsigned long)phys_ram_base;
1422 for(i = 0; i < CPU_TLB_SIZE; i++)
1423 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1424 for(i = 0; i < CPU_TLB_SIZE; i++)
1425 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1427 #if !defined(CONFIG_SOFTMMU)
1428 /* XXX: this is expensive */
1434 for(i = 0; i < L1_SIZE; i++) {
1437 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1438 for(j = 0; j < L2_SIZE; j++) {
1439 if (p->valid_tag == virt_valid_tag &&
1440 p->phys_addr >= start && p->phys_addr < end &&
1441 (p->prot & PROT_WRITE)) {
1442 if (addr < MMAP_AREA_END) {
1443 mprotect((void *)addr, TARGET_PAGE_SIZE,
1444 p->prot & ~PROT_WRITE);
1447 addr += TARGET_PAGE_SIZE;
1456 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1457 unsigned long start)
1460 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1461 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1462 if (addr == start) {
1463 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1468 /* update the TLB corresponding to virtual page vaddr and phys addr
1469 addr so that it is no longer dirty */
1470 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1472 CPUState *env = cpu_single_env;
1475 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1477 addr &= TARGET_PAGE_MASK;
1478 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1479 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1480 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1483 /* add a new TLB entry. At most one entry for a given virtual address
1484 is permitted. Return 0 if OK or 2 if the page could not be mapped
1485 (can only happen in non SOFTMMU mode for I/O pages or pages
1486 conflicting with the host address space). */
1487 int tlb_set_page(CPUState *env, target_ulong vaddr,
1488 target_phys_addr_t paddr, int prot,
1489 int is_user, int is_softmmu)
1493 TranslationBlock *first_tb;
1495 target_ulong address;
1496 unsigned long addend;
1499 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1502 pd = IO_MEM_UNASSIGNED;
1505 pd = p->phys_offset;
1506 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1507 /* NOTE: we also allocate the page at this stage */
1508 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1509 first_tb = p1->first_tb;
1512 #if defined(DEBUG_TLB)
1513 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1514 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1518 #if !defined(CONFIG_SOFTMMU)
1522 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1523 /* IO memory case */
1524 address = vaddr | pd;
1527 /* standard memory */
1529 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1532 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1534 if (prot & PAGE_READ) {
1535 env->tlb_read[is_user][index].address = address;
1536 env->tlb_read[is_user][index].addend = addend;
1538 env->tlb_read[is_user][index].address = -1;
1539 env->tlb_read[is_user][index].addend = -1;
1541 if (prot & PAGE_WRITE) {
1542 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1543 /* ROM: access is ignored (same as unassigned) */
1544 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1545 env->tlb_write[is_user][index].addend = addend;
1547 /* XXX: the PowerPC code seems not ready to handle
1548 self modifying code with DCBI */
1549 #if defined(TARGET_HAS_SMC) || 1
1551 /* if code is present, we use a specific memory
1552 handler. It works only for physical memory access */
1553 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1554 env->tlb_write[is_user][index].addend = addend;
1557 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1558 !cpu_physical_memory_is_dirty(pd)) {
1559 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1560 env->tlb_write[is_user][index].addend = addend;
1562 env->tlb_write[is_user][index].address = address;
1563 env->tlb_write[is_user][index].addend = addend;
1566 env->tlb_write[is_user][index].address = -1;
1567 env->tlb_write[is_user][index].addend = -1;
1570 #if !defined(CONFIG_SOFTMMU)
1572 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1573 /* IO access: no mapping is done as it will be handled by the
1575 if (!(env->hflags & HF_SOFTMMU_MASK))
1580 if (vaddr >= MMAP_AREA_END) {
1583 if (prot & PROT_WRITE) {
1584 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1585 #if defined(TARGET_HAS_SMC) || 1
1588 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1589 !cpu_physical_memory_is_dirty(pd))) {
1590 /* ROM: we do as if code was inside */
1591 /* if code is present, we only map as read only and save the
1595 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1598 vp->valid_tag = virt_valid_tag;
1599 prot &= ~PAGE_WRITE;
1602 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1603 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1604 if (map_addr == MAP_FAILED) {
1605 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1615 /* called from signal handler: invalidate the code and unprotect the
1616 page. Return TRUE if the fault was succesfully handled. */
1617 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1619 #if !defined(CONFIG_SOFTMMU)
1622 #if defined(DEBUG_TLB)
1623 printf("page_unprotect: addr=0x%08x\n", addr);
1625 addr &= TARGET_PAGE_MASK;
1627 /* if it is not mapped, no need to worry here */
1628 if (addr >= MMAP_AREA_END)
1630 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1633 /* NOTE: in this case, validate_tag is _not_ tested as it
1634 validates only the code TLB */
1635 if (vp->valid_tag != virt_valid_tag)
1637 if (!(vp->prot & PAGE_WRITE))
1639 #if defined(DEBUG_TLB)
1640 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1641 addr, vp->phys_addr, vp->prot);
1643 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1644 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1645 (unsigned long)addr, vp->prot);
1646 /* set the dirty bit */
1647 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1648 /* flush the code inside */
1649 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1658 void tlb_flush(CPUState *env, int flush_global)
1662 void tlb_flush_page(CPUState *env, target_ulong addr)
1666 int tlb_set_page(CPUState *env, target_ulong vaddr,
1667 target_phys_addr_t paddr, int prot,
1668 int is_user, int is_softmmu)
1673 /* dump memory mappings */
1674 void page_dump(FILE *f)
1676 unsigned long start, end;
1677 int i, j, prot, prot1;
1680 fprintf(f, "%-8s %-8s %-8s %s\n",
1681 "start", "end", "size", "prot");
1685 for(i = 0; i <= L1_SIZE; i++) {
1690 for(j = 0;j < L2_SIZE; j++) {
1695 if (prot1 != prot) {
1696 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1698 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1699 start, end, end - start,
1700 prot & PAGE_READ ? 'r' : '-',
1701 prot & PAGE_WRITE ? 'w' : '-',
1702 prot & PAGE_EXEC ? 'x' : '-');
1716 int page_get_flags(unsigned long address)
1720 p = page_find(address >> TARGET_PAGE_BITS);
1726 /* modify the flags of a page and invalidate the code if
1727 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1728 depending on PAGE_WRITE */
1729 void page_set_flags(unsigned long start, unsigned long end, int flags)
1734 start = start & TARGET_PAGE_MASK;
1735 end = TARGET_PAGE_ALIGN(end);
1736 if (flags & PAGE_WRITE)
1737 flags |= PAGE_WRITE_ORG;
1738 spin_lock(&tb_lock);
1739 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1740 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1741 /* if the write protection is set, then we invalidate the code
1743 if (!(p->flags & PAGE_WRITE) &&
1744 (flags & PAGE_WRITE) &&
1746 tb_invalidate_phys_page(addr, 0, NULL);
1750 spin_unlock(&tb_lock);
1753 /* called from signal handler: invalidate the code and unprotect the
1754 page. Return TRUE if the fault was succesfully handled. */
1755 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1757 unsigned int page_index, prot, pindex;
1759 unsigned long host_start, host_end, addr;
1761 host_start = address & qemu_host_page_mask;
1762 page_index = host_start >> TARGET_PAGE_BITS;
1763 p1 = page_find(page_index);
1766 host_end = host_start + qemu_host_page_size;
1769 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1773 /* if the page was really writable, then we change its
1774 protection back to writable */
1775 if (prot & PAGE_WRITE_ORG) {
1776 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1777 if (!(p1[pindex].flags & PAGE_WRITE)) {
1778 mprotect((void *)host_start, qemu_host_page_size,
1779 (prot & PAGE_BITS) | PAGE_WRITE);
1780 p1[pindex].flags |= PAGE_WRITE;
1781 /* and since the content will be modified, we must invalidate
1782 the corresponding translated code. */
1783 tb_invalidate_phys_page(address, pc, puc);
1784 #ifdef DEBUG_TB_CHECK
1785 tb_invalidate_check(address);
1793 /* call this function when system calls directly modify a memory area */
1794 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1796 unsigned long start, end, addr;
1798 start = (unsigned long)data;
1799 end = start + data_size;
1800 start &= TARGET_PAGE_MASK;
1801 end = TARGET_PAGE_ALIGN(end);
1802 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1803 page_unprotect(addr, 0, NULL);
1807 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1810 #endif /* defined(CONFIG_USER_ONLY) */
1812 /* register physical memory. 'size' must be a multiple of the target
1813 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1815 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1817 unsigned long phys_offset)
1819 unsigned long addr, end_addr;
1822 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1823 end_addr = start_addr + size;
1824 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1825 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1826 p->phys_offset = phys_offset;
1827 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1828 phys_offset += TARGET_PAGE_SIZE;
1832 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1837 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1841 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1842 unassigned_mem_readb,
1843 unassigned_mem_readb,
1844 unassigned_mem_readb,
1847 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1848 unassigned_mem_writeb,
1849 unassigned_mem_writeb,
1850 unassigned_mem_writeb,
1853 /* self modifying code support in soft mmu mode : writing to a page
1854 containing code comes to these functions */
1856 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1858 unsigned long phys_addr;
1860 phys_addr = addr - (unsigned long)phys_ram_base;
1861 #if !defined(CONFIG_USER_ONLY)
1862 tb_invalidate_phys_page_fast(phys_addr, 1);
1864 stb_raw((uint8_t *)addr, val);
1865 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1868 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1870 unsigned long phys_addr;
1872 phys_addr = addr - (unsigned long)phys_ram_base;
1873 #if !defined(CONFIG_USER_ONLY)
1874 tb_invalidate_phys_page_fast(phys_addr, 2);
1876 stw_raw((uint8_t *)addr, val);
1877 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1880 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1882 unsigned long phys_addr;
1884 phys_addr = addr - (unsigned long)phys_ram_base;
1885 #if !defined(CONFIG_USER_ONLY)
1886 tb_invalidate_phys_page_fast(phys_addr, 4);
1888 stl_raw((uint8_t *)addr, val);
1889 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1892 static CPUReadMemoryFunc *code_mem_read[3] = {
1893 NULL, /* never used */
1894 NULL, /* never used */
1895 NULL, /* never used */
1898 static CPUWriteMemoryFunc *code_mem_write[3] = {
1904 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1906 stb_raw((uint8_t *)addr, val);
1907 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1910 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1912 stw_raw((uint8_t *)addr, val);
1913 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1916 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1918 stl_raw((uint8_t *)addr, val);
1919 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1922 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1923 notdirty_mem_writeb,
1924 notdirty_mem_writew,
1925 notdirty_mem_writel,
1928 static void io_mem_init(void)
1930 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1931 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1932 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1933 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1936 /* alloc dirty bits array */
1937 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1940 /* mem_read and mem_write are arrays of functions containing the
1941 function to access byte (index 0), word (index 1) and dword (index
1942 2). All functions must be supplied. If io_index is non zero, the
1943 corresponding io zone is modified. If it is zero, a new io zone is
1944 allocated. The return value can be used with
1945 cpu_register_physical_memory(). (-1) is returned if error. */
1946 int cpu_register_io_memory(int io_index,
1947 CPUReadMemoryFunc **mem_read,
1948 CPUWriteMemoryFunc **mem_write,
1953 if (io_index <= 0) {
1954 if (io_index >= IO_MEM_NB_ENTRIES)
1956 io_index = io_mem_nb++;
1958 if (io_index >= IO_MEM_NB_ENTRIES)
1962 for(i = 0;i < 3; i++) {
1963 io_mem_read[io_index][i] = mem_read[i];
1964 io_mem_write[io_index][i] = mem_write[i];
1966 io_mem_opaque[io_index] = opaque;
1967 return io_index << IO_MEM_SHIFT;
1970 /* physical memory access (slow version, mainly for debug) */
1971 #if defined(CONFIG_USER_ONLY)
1972 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1973 int len, int is_write)
1979 page = addr & TARGET_PAGE_MASK;
1980 l = (page + TARGET_PAGE_SIZE) - addr;
1983 flags = page_get_flags(page);
1984 if (!(flags & PAGE_VALID))
1987 if (!(flags & PAGE_WRITE))
1989 memcpy((uint8_t *)addr, buf, len);
1991 if (!(flags & PAGE_READ))
1993 memcpy(buf, (uint8_t *)addr, len);
2001 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2002 int len, int is_write)
2007 target_phys_addr_t page;
2012 page = addr & TARGET_PAGE_MASK;
2013 l = (page + TARGET_PAGE_SIZE) - addr;
2016 p = phys_page_find(page >> TARGET_PAGE_BITS);
2018 pd = IO_MEM_UNASSIGNED;
2020 pd = p->phys_offset;
2024 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2026 if (l >= 4 && ((addr & 3) == 0)) {
2027 /* 32 bit read access */
2029 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2031 } else if (l >= 2 && ((addr & 1) == 0)) {
2032 /* 16 bit read access */
2033 val = lduw_raw(buf);
2034 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2038 val = ldub_raw(buf);
2039 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2043 unsigned long addr1;
2044 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2046 ptr = phys_ram_base + addr1;
2047 memcpy(ptr, buf, l);
2048 /* invalidate code */
2049 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2051 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
2054 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2055 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2057 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2058 if (l >= 4 && ((addr & 3) == 0)) {
2059 /* 32 bit read access */
2060 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2063 } else if (l >= 2 && ((addr & 1) == 0)) {
2064 /* 16 bit read access */
2065 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2070 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2076 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2077 (addr & ~TARGET_PAGE_MASK);
2078 memcpy(buf, ptr, l);
2088 /* virtual memory access for debug */
2089 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2090 uint8_t *buf, int len, int is_write)
2093 target_ulong page, phys_addr;
2096 page = addr & TARGET_PAGE_MASK;
2097 phys_addr = cpu_get_phys_page_debug(env, page);
2098 /* if no physical page mapped, return an error */
2099 if (phys_addr == -1)
2101 l = (page + TARGET_PAGE_SIZE) - addr;
2104 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2113 #if !defined(CONFIG_USER_ONLY)
2115 #define MMUSUFFIX _cmmu
2116 #define GETPC() NULL
2117 #define env cpu_single_env
2120 #include "softmmu_template.h"
2123 #include "softmmu_template.h"
2126 #include "softmmu_template.h"
2129 #include "softmmu_template.h"