2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
52 #if !defined(CONFIG_USER_ONLY)
53 /* TB consistency checks only implemented for usermode emulation. */
57 /* threshold to flush the translated code buffer */
58 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60 #define SMC_BITMAP_USE_THRESHOLD 10
62 #define MMAP_AREA_START 0x00000000
63 #define MMAP_AREA_END 0xa8000000
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
76 #define TARGET_PHYS_ADDR_SPACE_BITS 32
79 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
80 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82 /* any access to the tbs or the page table must use this lock */
83 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
86 uint8_t *code_gen_ptr;
90 uint8_t *phys_ram_base;
91 uint8_t *phys_ram_dirty;
92 static ram_addr_t phys_ram_alloc_offset = 0;
95 /* current CPU in the current thread. It is only valid inside
97 CPUState *cpu_single_env;
99 typedef struct PageDesc {
100 /* list of TBs intersecting this ram page */
101 TranslationBlock *first_tb;
102 /* in order to optimize self modifying code, we count the number
103 of lookups we do to a given page to use a bitmap */
104 unsigned int code_write_count;
105 uint8_t *code_bitmap;
106 #if defined(CONFIG_USER_ONLY)
111 typedef struct PhysPageDesc {
112 /* offset in host memory of the page + io_index in the low 12 bits */
113 uint32_t phys_offset;
117 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
118 /* XXX: this is a temporary hack for alpha target.
119 * In the future, this is to be replaced by a multi-level table
120 * to actually be able to handle the complete 64 bits address space.
122 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
127 #define L1_SIZE (1 << L1_BITS)
128 #define L2_SIZE (1 << L2_BITS)
130 static void io_mem_init(void);
132 unsigned long qemu_real_host_page_size;
133 unsigned long qemu_host_page_bits;
134 unsigned long qemu_host_page_size;
135 unsigned long qemu_host_page_mask;
137 /* XXX: for system emulation, it could just be an array */
138 static PageDesc *l1_map[L1_SIZE];
139 PhysPageDesc **l1_phys_map;
141 /* io memory support */
142 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
143 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
144 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
145 static int io_mem_nb;
146 #if defined(CONFIG_SOFTMMU)
147 static int io_mem_watch;
151 char *logfilename = "/tmp/qemu.log";
156 static int tlb_flush_count;
157 static int tb_flush_count;
158 static int tb_phys_invalidate_count;
160 static void page_init(void)
162 /* NOTE: we can always suppose that qemu_host_page_size >=
166 SYSTEM_INFO system_info;
169 GetSystemInfo(&system_info);
170 qemu_real_host_page_size = system_info.dwPageSize;
172 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
173 PAGE_EXECUTE_READWRITE, &old_protect);
176 qemu_real_host_page_size = getpagesize();
178 unsigned long start, end;
180 start = (unsigned long)code_gen_buffer;
181 start &= ~(qemu_real_host_page_size - 1);
183 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
184 end += qemu_real_host_page_size - 1;
185 end &= ~(qemu_real_host_page_size - 1);
187 mprotect((void *)start, end - start,
188 PROT_READ | PROT_WRITE | PROT_EXEC);
192 if (qemu_host_page_size == 0)
193 qemu_host_page_size = qemu_real_host_page_size;
194 if (qemu_host_page_size < TARGET_PAGE_SIZE)
195 qemu_host_page_size = TARGET_PAGE_SIZE;
196 qemu_host_page_bits = 0;
197 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
198 qemu_host_page_bits++;
199 qemu_host_page_mask = ~(qemu_host_page_size - 1);
200 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
201 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
204 static inline PageDesc *page_find_alloc(unsigned int index)
208 lp = &l1_map[index >> L2_BITS];
211 /* allocate if not found */
212 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
213 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
216 return p + (index & (L2_SIZE - 1));
219 static inline PageDesc *page_find(unsigned int index)
223 p = l1_map[index >> L2_BITS];
226 return p + (index & (L2_SIZE - 1));
229 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
234 p = (void **)l1_phys_map;
235 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
237 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
238 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
240 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
243 /* allocate if not found */
246 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
247 memset(p, 0, sizeof(void *) * L1_SIZE);
251 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
255 /* allocate if not found */
258 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
260 for (i = 0; i < L2_SIZE; i++)
261 pd[i].phys_offset = IO_MEM_UNASSIGNED;
263 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
266 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
268 return phys_page_find_alloc(index, 0);
271 #if !defined(CONFIG_USER_ONLY)
272 static void tlb_protect_code(ram_addr_t ram_addr);
273 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
277 void cpu_exec_init(CPUState *env)
283 code_gen_ptr = code_gen_buffer;
287 env->next_cpu = NULL;
290 while (*penv != NULL) {
291 penv = (CPUState **)&(*penv)->next_cpu;
294 env->cpu_index = cpu_index;
295 env->nb_watchpoints = 0;
299 static inline void invalidate_page_bitmap(PageDesc *p)
301 if (p->code_bitmap) {
302 qemu_free(p->code_bitmap);
303 p->code_bitmap = NULL;
305 p->code_write_count = 0;
308 /* set to NULL all the 'first_tb' fields in all PageDescs */
309 static void page_flush_tb(void)
314 for(i = 0; i < L1_SIZE; i++) {
317 for(j = 0; j < L2_SIZE; j++) {
319 invalidate_page_bitmap(p);
326 /* flush all the translation blocks */
327 /* XXX: tb_flush is currently not thread safe */
328 void tb_flush(CPUState *env1)
331 #if defined(DEBUG_FLUSH)
332 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
333 code_gen_ptr - code_gen_buffer,
335 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
339 for(env = first_cpu; env != NULL; env = env->next_cpu) {
340 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
343 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
346 code_gen_ptr = code_gen_buffer;
347 /* XXX: flush processor icache at this point if cache flush is
352 #ifdef DEBUG_TB_CHECK
354 static void tb_invalidate_check(target_ulong address)
356 TranslationBlock *tb;
358 address &= TARGET_PAGE_MASK;
359 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
360 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
361 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
362 address >= tb->pc + tb->size)) {
363 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
364 address, (long)tb->pc, tb->size);
370 /* verify that all the pages have correct rights for code */
371 static void tb_page_check(void)
373 TranslationBlock *tb;
374 int i, flags1, flags2;
376 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
377 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
378 flags1 = page_get_flags(tb->pc);
379 flags2 = page_get_flags(tb->pc + tb->size - 1);
380 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
381 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
382 (long)tb->pc, tb->size, flags1, flags2);
388 void tb_jmp_check(TranslationBlock *tb)
390 TranslationBlock *tb1;
393 /* suppress any remaining jumps to this TB */
397 tb1 = (TranslationBlock *)((long)tb1 & ~3);
400 tb1 = tb1->jmp_next[n1];
402 /* check end of list */
404 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
410 /* invalidate one TB */
411 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
414 TranslationBlock *tb1;
418 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
421 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
425 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
427 TranslationBlock *tb1;
433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
435 *ptb = tb1->page_next[n1];
438 ptb = &tb1->page_next[n1];
442 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
444 TranslationBlock *tb1, **ptb;
447 ptb = &tb->jmp_next[n];
450 /* find tb(n) in circular list */
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (n1 == n && tb1 == tb)
458 ptb = &tb1->jmp_first;
460 ptb = &tb1->jmp_next[n1];
463 /* now we can suppress tb(n) from the list */
464 *ptb = tb->jmp_next[n];
466 tb->jmp_next[n] = NULL;
470 /* reset the jump entry 'n' of a TB so that it is not chained to
472 static inline void tb_reset_jump(TranslationBlock *tb, int n)
474 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
477 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
482 target_ulong phys_pc;
483 TranslationBlock *tb1, *tb2;
485 /* remove the TB from the hash list */
486 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
487 h = tb_phys_hash_func(phys_pc);
488 tb_remove(&tb_phys_hash[h], tb,
489 offsetof(TranslationBlock, phys_hash_next));
491 /* remove the TB from the page list */
492 if (tb->page_addr[0] != page_addr) {
493 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
494 tb_page_remove(&p->first_tb, tb);
495 invalidate_page_bitmap(p);
497 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
498 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
499 tb_page_remove(&p->first_tb, tb);
500 invalidate_page_bitmap(p);
503 tb_invalidated_flag = 1;
505 /* remove the TB from the hash list */
506 h = tb_jmp_cache_hash_func(tb->pc);
507 for(env = first_cpu; env != NULL; env = env->next_cpu) {
508 if (env->tb_jmp_cache[h] == tb)
509 env->tb_jmp_cache[h] = NULL;
512 /* suppress this TB from the two jump lists */
513 tb_jmp_remove(tb, 0);
514 tb_jmp_remove(tb, 1);
516 /* suppress any remaining jumps to this TB */
522 tb1 = (TranslationBlock *)((long)tb1 & ~3);
523 tb2 = tb1->jmp_next[n1];
524 tb_reset_jump(tb1, n1);
525 tb1->jmp_next[n1] = NULL;
528 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
530 tb_phys_invalidate_count++;
533 static inline void set_bits(uint8_t *tab, int start, int len)
539 mask = 0xff << (start & 7);
540 if ((start & ~7) == (end & ~7)) {
542 mask &= ~(0xff << (end & 7));
547 start = (start + 8) & ~7;
549 while (start < end1) {
554 mask = ~(0xff << (end & 7));
560 static void build_page_bitmap(PageDesc *p)
562 int n, tb_start, tb_end;
563 TranslationBlock *tb;
565 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
568 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
573 tb = (TranslationBlock *)((long)tb & ~3);
574 /* NOTE: this is subtle as a TB may span two physical pages */
576 /* NOTE: tb_end may be after the end of the page, but
577 it is not a problem */
578 tb_start = tb->pc & ~TARGET_PAGE_MASK;
579 tb_end = tb_start + tb->size;
580 if (tb_end > TARGET_PAGE_SIZE)
581 tb_end = TARGET_PAGE_SIZE;
584 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
586 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
587 tb = tb->page_next[n];
591 #ifdef TARGET_HAS_PRECISE_SMC
593 static void tb_gen_code(CPUState *env,
594 target_ulong pc, target_ulong cs_base, int flags,
597 TranslationBlock *tb;
599 target_ulong phys_pc, phys_page2, virt_page2;
602 phys_pc = get_phys_addr_code(env, pc);
605 /* flush must be done */
607 /* cannot fail at this point */
610 tc_ptr = code_gen_ptr;
612 tb->cs_base = cs_base;
615 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
616 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
618 /* check next page if needed */
619 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
621 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
622 phys_page2 = get_phys_addr_code(env, virt_page2);
624 tb_link_phys(tb, phys_pc, phys_page2);
628 /* invalidate all TBs which intersect with the target physical page
629 starting in range [start;end[. NOTE: start and end must refer to
630 the same physical page. 'is_cpu_write_access' should be true if called
631 from a real cpu write access: the virtual CPU will exit the current
632 TB if code is modified inside this TB. */
633 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
634 int is_cpu_write_access)
636 int n, current_tb_modified, current_tb_not_found, current_flags;
637 CPUState *env = cpu_single_env;
639 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
640 target_ulong tb_start, tb_end;
641 target_ulong current_pc, current_cs_base;
643 p = page_find(start >> TARGET_PAGE_BITS);
646 if (!p->code_bitmap &&
647 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
648 is_cpu_write_access) {
649 /* build code bitmap */
650 build_page_bitmap(p);
653 /* we remove all the TBs in the range [start, end[ */
654 /* XXX: see if in some cases it could be faster to invalidate all the code */
655 current_tb_not_found = is_cpu_write_access;
656 current_tb_modified = 0;
657 current_tb = NULL; /* avoid warning */
658 current_pc = 0; /* avoid warning */
659 current_cs_base = 0; /* avoid warning */
660 current_flags = 0; /* avoid warning */
664 tb = (TranslationBlock *)((long)tb & ~3);
665 tb_next = tb->page_next[n];
666 /* NOTE: this is subtle as a TB may span two physical pages */
668 /* NOTE: tb_end may be after the end of the page, but
669 it is not a problem */
670 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
671 tb_end = tb_start + tb->size;
673 tb_start = tb->page_addr[1];
674 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
676 if (!(tb_end <= start || tb_start >= end)) {
677 #ifdef TARGET_HAS_PRECISE_SMC
678 if (current_tb_not_found) {
679 current_tb_not_found = 0;
681 if (env->mem_write_pc) {
682 /* now we have a real cpu fault */
683 current_tb = tb_find_pc(env->mem_write_pc);
686 if (current_tb == tb &&
687 !(current_tb->cflags & CF_SINGLE_INSN)) {
688 /* If we are modifying the current TB, we must stop
689 its execution. We could be more precise by checking
690 that the modification is after the current PC, but it
691 would require a specialized function to partially
692 restore the CPU state */
694 current_tb_modified = 1;
695 cpu_restore_state(current_tb, env,
696 env->mem_write_pc, NULL);
697 #if defined(TARGET_I386)
698 current_flags = env->hflags;
699 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
700 current_cs_base = (target_ulong)env->segs[R_CS].base;
701 current_pc = current_cs_base + env->eip;
703 #error unsupported CPU
706 #endif /* TARGET_HAS_PRECISE_SMC */
707 /* we need to do that to handle the case where a signal
708 occurs while doing tb_phys_invalidate() */
711 saved_tb = env->current_tb;
712 env->current_tb = NULL;
714 tb_phys_invalidate(tb, -1);
716 env->current_tb = saved_tb;
717 if (env->interrupt_request && env->current_tb)
718 cpu_interrupt(env, env->interrupt_request);
723 #if !defined(CONFIG_USER_ONLY)
724 /* if no code remaining, no need to continue to use slow writes */
726 invalidate_page_bitmap(p);
727 if (is_cpu_write_access) {
728 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
732 #ifdef TARGET_HAS_PRECISE_SMC
733 if (current_tb_modified) {
734 /* we generate a block containing just the instruction
735 modifying the memory. It will ensure that it cannot modify
737 env->current_tb = NULL;
738 tb_gen_code(env, current_pc, current_cs_base, current_flags,
740 cpu_resume_from_signal(env, NULL);
745 /* len must be <= 8 and start must be a multiple of len */
746 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
753 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
754 cpu_single_env->mem_write_vaddr, len,
756 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
760 p = page_find(start >> TARGET_PAGE_BITS);
763 if (p->code_bitmap) {
764 offset = start & ~TARGET_PAGE_MASK;
765 b = p->code_bitmap[offset >> 3] >> (offset & 7);
766 if (b & ((1 << len) - 1))
770 tb_invalidate_phys_page_range(start, start + len, 1);
774 #if !defined(CONFIG_SOFTMMU)
775 static void tb_invalidate_phys_page(target_ulong addr,
776 unsigned long pc, void *puc)
778 int n, current_flags, current_tb_modified;
779 target_ulong current_pc, current_cs_base;
781 TranslationBlock *tb, *current_tb;
782 #ifdef TARGET_HAS_PRECISE_SMC
783 CPUState *env = cpu_single_env;
786 addr &= TARGET_PAGE_MASK;
787 p = page_find(addr >> TARGET_PAGE_BITS);
791 current_tb_modified = 0;
793 current_pc = 0; /* avoid warning */
794 current_cs_base = 0; /* avoid warning */
795 current_flags = 0; /* avoid warning */
796 #ifdef TARGET_HAS_PRECISE_SMC
798 current_tb = tb_find_pc(pc);
803 tb = (TranslationBlock *)((long)tb & ~3);
804 #ifdef TARGET_HAS_PRECISE_SMC
805 if (current_tb == tb &&
806 !(current_tb->cflags & CF_SINGLE_INSN)) {
807 /* If we are modifying the current TB, we must stop
808 its execution. We could be more precise by checking
809 that the modification is after the current PC, but it
810 would require a specialized function to partially
811 restore the CPU state */
813 current_tb_modified = 1;
814 cpu_restore_state(current_tb, env, pc, puc);
815 #if defined(TARGET_I386)
816 current_flags = env->hflags;
817 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
818 current_cs_base = (target_ulong)env->segs[R_CS].base;
819 current_pc = current_cs_base + env->eip;
821 #error unsupported CPU
824 #endif /* TARGET_HAS_PRECISE_SMC */
825 tb_phys_invalidate(tb, addr);
826 tb = tb->page_next[n];
829 #ifdef TARGET_HAS_PRECISE_SMC
830 if (current_tb_modified) {
831 /* we generate a block containing just the instruction
832 modifying the memory. It will ensure that it cannot modify
834 env->current_tb = NULL;
835 tb_gen_code(env, current_pc, current_cs_base, current_flags,
837 cpu_resume_from_signal(env, puc);
843 /* add the tb in the target page and protect it if necessary */
844 static inline void tb_alloc_page(TranslationBlock *tb,
845 unsigned int n, target_ulong page_addr)
848 TranslationBlock *last_first_tb;
850 tb->page_addr[n] = page_addr;
851 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
852 tb->page_next[n] = p->first_tb;
853 last_first_tb = p->first_tb;
854 p->first_tb = (TranslationBlock *)((long)tb | n);
855 invalidate_page_bitmap(p);
857 #if defined(TARGET_HAS_SMC) || 1
859 #if defined(CONFIG_USER_ONLY)
860 if (p->flags & PAGE_WRITE) {
865 /* force the host page as non writable (writes will have a
866 page fault + mprotect overhead) */
867 page_addr &= qemu_host_page_mask;
869 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
870 addr += TARGET_PAGE_SIZE) {
872 p2 = page_find (addr >> TARGET_PAGE_BITS);
876 p2->flags &= ~PAGE_WRITE;
877 page_get_flags(addr);
879 mprotect(g2h(page_addr), qemu_host_page_size,
880 (prot & PAGE_BITS) & ~PAGE_WRITE);
881 #ifdef DEBUG_TB_INVALIDATE
882 printf("protecting code page: 0x%08lx\n",
887 /* if some code is already present, then the pages are already
888 protected. So we handle the case where only the first TB is
889 allocated in a physical page */
890 if (!last_first_tb) {
891 tlb_protect_code(page_addr);
895 #endif /* TARGET_HAS_SMC */
898 /* Allocate a new translation block. Flush the translation buffer if
899 too many translation blocks or too much generated code. */
900 TranslationBlock *tb_alloc(target_ulong pc)
902 TranslationBlock *tb;
904 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
905 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
913 /* add a new TB and link it to the physical page tables. phys_page2 is
914 (-1) to indicate that only one page contains the TB. */
915 void tb_link_phys(TranslationBlock *tb,
916 target_ulong phys_pc, target_ulong phys_page2)
919 TranslationBlock **ptb;
921 /* add in the physical hash table */
922 h = tb_phys_hash_func(phys_pc);
923 ptb = &tb_phys_hash[h];
924 tb->phys_hash_next = *ptb;
927 /* add in the page list */
928 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
929 if (phys_page2 != -1)
930 tb_alloc_page(tb, 1, phys_page2);
932 tb->page_addr[1] = -1;
934 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
935 tb->jmp_next[0] = NULL;
936 tb->jmp_next[1] = NULL;
938 tb->cflags &= ~CF_FP_USED;
939 if (tb->cflags & CF_TB_FP_USED)
940 tb->cflags |= CF_FP_USED;
943 /* init original jump addresses */
944 if (tb->tb_next_offset[0] != 0xffff)
945 tb_reset_jump(tb, 0);
946 if (tb->tb_next_offset[1] != 0xffff)
947 tb_reset_jump(tb, 1);
949 #ifdef DEBUG_TB_CHECK
954 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
955 tb[1].tc_ptr. Return NULL if not found */
956 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
960 TranslationBlock *tb;
964 if (tc_ptr < (unsigned long)code_gen_buffer ||
965 tc_ptr >= (unsigned long)code_gen_ptr)
967 /* binary search (cf Knuth) */
970 while (m_min <= m_max) {
971 m = (m_min + m_max) >> 1;
973 v = (unsigned long)tb->tc_ptr;
976 else if (tc_ptr < v) {
985 static void tb_reset_jump_recursive(TranslationBlock *tb);
987 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
989 TranslationBlock *tb1, *tb_next, **ptb;
992 tb1 = tb->jmp_next[n];
994 /* find head of list */
997 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1000 tb1 = tb1->jmp_next[n1];
1002 /* we are now sure now that tb jumps to tb1 */
1005 /* remove tb from the jmp_first list */
1006 ptb = &tb_next->jmp_first;
1010 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1011 if (n1 == n && tb1 == tb)
1013 ptb = &tb1->jmp_next[n1];
1015 *ptb = tb->jmp_next[n];
1016 tb->jmp_next[n] = NULL;
1018 /* suppress the jump to next tb in generated code */
1019 tb_reset_jump(tb, n);
1021 /* suppress jumps in the tb on which we could have jumped */
1022 tb_reset_jump_recursive(tb_next);
1026 static void tb_reset_jump_recursive(TranslationBlock *tb)
1028 tb_reset_jump_recursive2(tb, 0);
1029 tb_reset_jump_recursive2(tb, 1);
1032 #if defined(TARGET_HAS_ICE)
1033 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1035 target_phys_addr_t addr;
1037 ram_addr_t ram_addr;
1040 addr = cpu_get_phys_page_debug(env, pc);
1041 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1043 pd = IO_MEM_UNASSIGNED;
1045 pd = p->phys_offset;
1047 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1048 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1052 /* Add a watchpoint. */
1053 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1057 for (i = 0; i < env->nb_watchpoints; i++) {
1058 if (addr == env->watchpoint[i].vaddr)
1061 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1064 i = env->nb_watchpoints++;
1065 env->watchpoint[i].vaddr = addr;
1066 tlb_flush_page(env, addr);
1067 /* FIXME: This flush is needed because of the hack to make memory ops
1068 terminate the TB. It can be removed once the proper IO trap and
1069 re-execute bits are in. */
1074 /* Remove a watchpoint. */
1075 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1079 for (i = 0; i < env->nb_watchpoints; i++) {
1080 if (addr == env->watchpoint[i].vaddr) {
1081 env->nb_watchpoints--;
1082 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1083 tlb_flush_page(env, addr);
1090 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1091 breakpoint is reached */
1092 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1094 #if defined(TARGET_HAS_ICE)
1097 for(i = 0; i < env->nb_breakpoints; i++) {
1098 if (env->breakpoints[i] == pc)
1102 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1104 env->breakpoints[env->nb_breakpoints++] = pc;
1106 breakpoint_invalidate(env, pc);
1113 /* remove a breakpoint */
1114 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1116 #if defined(TARGET_HAS_ICE)
1118 for(i = 0; i < env->nb_breakpoints; i++) {
1119 if (env->breakpoints[i] == pc)
1124 env->nb_breakpoints--;
1125 if (i < env->nb_breakpoints)
1126 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1128 breakpoint_invalidate(env, pc);
1135 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1136 CPU loop after each instruction */
1137 void cpu_single_step(CPUState *env, int enabled)
1139 #if defined(TARGET_HAS_ICE)
1140 if (env->singlestep_enabled != enabled) {
1141 env->singlestep_enabled = enabled;
1142 /* must flush all the translated code to avoid inconsistancies */
1143 /* XXX: only flush what is necessary */
1149 /* enable or disable low levels log */
1150 void cpu_set_log(int log_flags)
1152 loglevel = log_flags;
1153 if (loglevel && !logfile) {
1154 logfile = fopen(logfilename, "w");
1156 perror(logfilename);
1159 #if !defined(CONFIG_SOFTMMU)
1160 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1162 static uint8_t logfile_buf[4096];
1163 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1166 setvbuf(logfile, NULL, _IOLBF, 0);
1171 void cpu_set_log_filename(const char *filename)
1173 logfilename = strdup(filename);
1176 /* mask must never be zero, except for A20 change call */
1177 void cpu_interrupt(CPUState *env, int mask)
1179 TranslationBlock *tb;
1180 static int interrupt_lock;
1182 env->interrupt_request |= mask;
1183 /* if the cpu is currently executing code, we must unlink it and
1184 all the potentially executing TB */
1185 tb = env->current_tb;
1186 if (tb && !testandset(&interrupt_lock)) {
1187 env->current_tb = NULL;
1188 tb_reset_jump_recursive(tb);
1193 void cpu_reset_interrupt(CPUState *env, int mask)
1195 env->interrupt_request &= ~mask;
1198 CPULogItem cpu_log_items[] = {
1199 { CPU_LOG_TB_OUT_ASM, "out_asm",
1200 "show generated host assembly code for each compiled TB" },
1201 { CPU_LOG_TB_IN_ASM, "in_asm",
1202 "show target assembly code for each compiled TB" },
1203 { CPU_LOG_TB_OP, "op",
1204 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1206 { CPU_LOG_TB_OP_OPT, "op_opt",
1207 "show micro ops after optimization for each compiled TB" },
1209 { CPU_LOG_INT, "int",
1210 "show interrupts/exceptions in short format" },
1211 { CPU_LOG_EXEC, "exec",
1212 "show trace before each executed TB (lots of logs)" },
1213 { CPU_LOG_TB_CPU, "cpu",
1214 "show CPU state before bloc translation" },
1216 { CPU_LOG_PCALL, "pcall",
1217 "show protected mode far calls/returns/exceptions" },
1220 { CPU_LOG_IOPORT, "ioport",
1221 "show all i/o ports accesses" },
1226 static int cmp1(const char *s1, int n, const char *s2)
1228 if (strlen(s2) != n)
1230 return memcmp(s1, s2, n) == 0;
1233 /* takes a comma separated list of log masks. Return 0 if error. */
1234 int cpu_str_to_log_mask(const char *str)
1243 p1 = strchr(p, ',');
1246 if(cmp1(p,p1-p,"all")) {
1247 for(item = cpu_log_items; item->mask != 0; item++) {
1251 for(item = cpu_log_items; item->mask != 0; item++) {
1252 if (cmp1(p, p1 - p, item->name))
1266 void cpu_abort(CPUState *env, const char *fmt, ...)
1271 fprintf(stderr, "qemu: fatal: ");
1272 vfprintf(stderr, fmt, ap);
1273 fprintf(stderr, "\n");
1275 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1277 cpu_dump_state(env, stderr, fprintf, 0);
1283 CPUState *cpu_copy(CPUState *env)
1285 CPUState *new_env = cpu_init();
1286 /* preserve chaining and index */
1287 CPUState *next_cpu = new_env->next_cpu;
1288 int cpu_index = new_env->cpu_index;
1289 memcpy(new_env, env, sizeof(CPUState));
1290 new_env->next_cpu = next_cpu;
1291 new_env->cpu_index = cpu_index;
1295 #if !defined(CONFIG_USER_ONLY)
1297 /* NOTE: if flush_global is true, also flush global entries (not
1299 void tlb_flush(CPUState *env, int flush_global)
1303 #if defined(DEBUG_TLB)
1304 printf("tlb_flush:\n");
1306 /* must reset current TB so that interrupts cannot modify the
1307 links while we are modifying them */
1308 env->current_tb = NULL;
1310 for(i = 0; i < CPU_TLB_SIZE; i++) {
1311 env->tlb_table[0][i].addr_read = -1;
1312 env->tlb_table[0][i].addr_write = -1;
1313 env->tlb_table[0][i].addr_code = -1;
1314 env->tlb_table[1][i].addr_read = -1;
1315 env->tlb_table[1][i].addr_write = -1;
1316 env->tlb_table[1][i].addr_code = -1;
1317 #if (NB_MMU_MODES >= 3)
1318 env->tlb_table[2][i].addr_read = -1;
1319 env->tlb_table[2][i].addr_write = -1;
1320 env->tlb_table[2][i].addr_code = -1;
1321 #if (NB_MMU_MODES == 4)
1322 env->tlb_table[3][i].addr_read = -1;
1323 env->tlb_table[3][i].addr_write = -1;
1324 env->tlb_table[3][i].addr_code = -1;
1329 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1331 #if !defined(CONFIG_SOFTMMU)
1332 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1335 if (env->kqemu_enabled) {
1336 kqemu_flush(env, flush_global);
1342 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1344 if (addr == (tlb_entry->addr_read &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1346 addr == (tlb_entry->addr_write &
1347 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1348 addr == (tlb_entry->addr_code &
1349 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1350 tlb_entry->addr_read = -1;
1351 tlb_entry->addr_write = -1;
1352 tlb_entry->addr_code = -1;
1356 void tlb_flush_page(CPUState *env, target_ulong addr)
1359 TranslationBlock *tb;
1361 #if defined(DEBUG_TLB)
1362 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1364 /* must reset current TB so that interrupts cannot modify the
1365 links while we are modifying them */
1366 env->current_tb = NULL;
1368 addr &= TARGET_PAGE_MASK;
1369 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1370 tlb_flush_entry(&env->tlb_table[0][i], addr);
1371 tlb_flush_entry(&env->tlb_table[1][i], addr);
1372 #if (NB_MMU_MODES >= 3)
1373 tlb_flush_entry(&env->tlb_table[2][i], addr);
1374 #if (NB_MMU_MODES == 4)
1375 tlb_flush_entry(&env->tlb_table[3][i], addr);
1379 /* Discard jump cache entries for any tb which might potentially
1380 overlap the flushed page. */
1381 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1382 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1384 i = tb_jmp_cache_hash_page(addr);
1385 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1387 #if !defined(CONFIG_SOFTMMU)
1388 if (addr < MMAP_AREA_END)
1389 munmap((void *)addr, TARGET_PAGE_SIZE);
1392 if (env->kqemu_enabled) {
1393 kqemu_flush_page(env, addr);
1398 /* update the TLBs so that writes to code in the virtual page 'addr'
1400 static void tlb_protect_code(ram_addr_t ram_addr)
1402 cpu_physical_memory_reset_dirty(ram_addr,
1403 ram_addr + TARGET_PAGE_SIZE,
1407 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1408 tested for self modifying code */
1409 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1412 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1415 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1416 unsigned long start, unsigned long length)
1419 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1420 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1421 if ((addr - start) < length) {
1422 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1427 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1431 unsigned long length, start1;
1435 start &= TARGET_PAGE_MASK;
1436 end = TARGET_PAGE_ALIGN(end);
1438 length = end - start;
1441 len = length >> TARGET_PAGE_BITS;
1443 /* XXX: should not depend on cpu context */
1445 if (env->kqemu_enabled) {
1448 for(i = 0; i < len; i++) {
1449 kqemu_set_notdirty(env, addr);
1450 addr += TARGET_PAGE_SIZE;
1454 mask = ~dirty_flags;
1455 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1456 for(i = 0; i < len; i++)
1459 /* we modify the TLB cache so that the dirty bit will be set again
1460 when accessing the range */
1461 start1 = start + (unsigned long)phys_ram_base;
1462 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1463 for(i = 0; i < CPU_TLB_SIZE; i++)
1464 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1465 for(i = 0; i < CPU_TLB_SIZE; i++)
1466 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1467 #if (NB_MMU_MODES >= 3)
1468 for(i = 0; i < CPU_TLB_SIZE; i++)
1469 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1470 #if (NB_MMU_MODES == 4)
1471 for(i = 0; i < CPU_TLB_SIZE; i++)
1472 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1477 #if !defined(CONFIG_SOFTMMU)
1478 /* XXX: this is expensive */
1484 for(i = 0; i < L1_SIZE; i++) {
1487 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1488 for(j = 0; j < L2_SIZE; j++) {
1489 if (p->valid_tag == virt_valid_tag &&
1490 p->phys_addr >= start && p->phys_addr < end &&
1491 (p->prot & PROT_WRITE)) {
1492 if (addr < MMAP_AREA_END) {
1493 mprotect((void *)addr, TARGET_PAGE_SIZE,
1494 p->prot & ~PROT_WRITE);
1497 addr += TARGET_PAGE_SIZE;
1506 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1508 ram_addr_t ram_addr;
1510 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1511 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1512 tlb_entry->addend - (unsigned long)phys_ram_base;
1513 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1514 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1519 /* update the TLB according to the current state of the dirty bits */
1520 void cpu_tlb_update_dirty(CPUState *env)
1523 for(i = 0; i < CPU_TLB_SIZE; i++)
1524 tlb_update_dirty(&env->tlb_table[0][i]);
1525 for(i = 0; i < CPU_TLB_SIZE; i++)
1526 tlb_update_dirty(&env->tlb_table[1][i]);
1527 #if (NB_MMU_MODES >= 3)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_update_dirty(&env->tlb_table[2][i]);
1530 #if (NB_MMU_MODES == 4)
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
1532 tlb_update_dirty(&env->tlb_table[3][i]);
1537 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1538 unsigned long start)
1541 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1542 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1543 if (addr == start) {
1544 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1549 /* update the TLB corresponding to virtual page vaddr and phys addr
1550 addr so that it is no longer dirty */
1551 static inline void tlb_set_dirty(CPUState *env,
1552 unsigned long addr, target_ulong vaddr)
1556 addr &= TARGET_PAGE_MASK;
1557 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1558 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1559 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1560 #if (NB_MMU_MODES >= 3)
1561 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1562 #if (NB_MMU_MODES == 4)
1563 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1568 /* add a new TLB entry. At most one entry for a given virtual address
1569 is permitted. Return 0 if OK or 2 if the page could not be mapped
1570 (can only happen in non SOFTMMU mode for I/O pages or pages
1571 conflicting with the host address space). */
1572 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1573 target_phys_addr_t paddr, int prot,
1574 int is_user, int is_softmmu)
1579 target_ulong address;
1580 target_phys_addr_t addend;
1585 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1587 pd = IO_MEM_UNASSIGNED;
1589 pd = p->phys_offset;
1591 #if defined(DEBUG_TLB)
1592 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1593 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1597 #if !defined(CONFIG_SOFTMMU)
1601 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1602 /* IO memory case */
1603 address = vaddr | pd;
1606 /* standard memory */
1608 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1611 /* Make accesses to pages with watchpoints go via the
1612 watchpoint trap routines. */
1613 for (i = 0; i < env->nb_watchpoints; i++) {
1614 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1615 if (address & ~TARGET_PAGE_MASK) {
1616 env->watchpoint[i].is_ram = 0;
1617 address = vaddr | io_mem_watch;
1619 env->watchpoint[i].is_ram = 1;
1620 /* TODO: Figure out how to make read watchpoints coexist
1622 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1627 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1629 te = &env->tlb_table[is_user][index];
1630 te->addend = addend;
1631 if (prot & PAGE_READ) {
1632 te->addr_read = address;
1636 if (prot & PAGE_EXEC) {
1637 te->addr_code = address;
1641 if (prot & PAGE_WRITE) {
1642 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1643 (pd & IO_MEM_ROMD)) {
1644 /* write access calls the I/O callback */
1645 te->addr_write = vaddr |
1646 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1647 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1648 !cpu_physical_memory_is_dirty(pd)) {
1649 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1651 te->addr_write = address;
1654 te->addr_write = -1;
1657 #if !defined(CONFIG_SOFTMMU)
1659 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1660 /* IO access: no mapping is done as it will be handled by the
1662 if (!(env->hflags & HF_SOFTMMU_MASK))
1667 if (vaddr >= MMAP_AREA_END) {
1670 if (prot & PROT_WRITE) {
1671 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1672 #if defined(TARGET_HAS_SMC) || 1
1675 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1676 !cpu_physical_memory_is_dirty(pd))) {
1677 /* ROM: we do as if code was inside */
1678 /* if code is present, we only map as read only and save the
1682 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1685 vp->valid_tag = virt_valid_tag;
1686 prot &= ~PAGE_WRITE;
1689 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1690 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1691 if (map_addr == MAP_FAILED) {
1692 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1702 /* called from signal handler: invalidate the code and unprotect the
1703 page. Return TRUE if the fault was succesfully handled. */
1704 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1706 #if !defined(CONFIG_SOFTMMU)
1709 #if defined(DEBUG_TLB)
1710 printf("page_unprotect: addr=0x%08x\n", addr);
1712 addr &= TARGET_PAGE_MASK;
1714 /* if it is not mapped, no need to worry here */
1715 if (addr >= MMAP_AREA_END)
1717 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1720 /* NOTE: in this case, validate_tag is _not_ tested as it
1721 validates only the code TLB */
1722 if (vp->valid_tag != virt_valid_tag)
1724 if (!(vp->prot & PAGE_WRITE))
1726 #if defined(DEBUG_TLB)
1727 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1728 addr, vp->phys_addr, vp->prot);
1730 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1731 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1732 (unsigned long)addr, vp->prot);
1733 /* set the dirty bit */
1734 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1735 /* flush the code inside */
1736 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1745 void tlb_flush(CPUState *env, int flush_global)
1749 void tlb_flush_page(CPUState *env, target_ulong addr)
1753 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754 target_phys_addr_t paddr, int prot,
1755 int is_user, int is_softmmu)
1760 /* dump memory mappings */
1761 void page_dump(FILE *f)
1763 unsigned long start, end;
1764 int i, j, prot, prot1;
1767 fprintf(f, "%-8s %-8s %-8s %s\n",
1768 "start", "end", "size", "prot");
1772 for(i = 0; i <= L1_SIZE; i++) {
1777 for(j = 0;j < L2_SIZE; j++) {
1782 if (prot1 != prot) {
1783 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1785 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1786 start, end, end - start,
1787 prot & PAGE_READ ? 'r' : '-',
1788 prot & PAGE_WRITE ? 'w' : '-',
1789 prot & PAGE_EXEC ? 'x' : '-');
1803 int page_get_flags(target_ulong address)
1807 p = page_find(address >> TARGET_PAGE_BITS);
1813 /* modify the flags of a page and invalidate the code if
1814 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1815 depending on PAGE_WRITE */
1816 void page_set_flags(target_ulong start, target_ulong end, int flags)
1821 start = start & TARGET_PAGE_MASK;
1822 end = TARGET_PAGE_ALIGN(end);
1823 if (flags & PAGE_WRITE)
1824 flags |= PAGE_WRITE_ORG;
1825 spin_lock(&tb_lock);
1826 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1827 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1828 /* if the write protection is set, then we invalidate the code
1830 if (!(p->flags & PAGE_WRITE) &&
1831 (flags & PAGE_WRITE) &&
1833 tb_invalidate_phys_page(addr, 0, NULL);
1837 spin_unlock(&tb_lock);
1840 /* called from signal handler: invalidate the code and unprotect the
1841 page. Return TRUE if the fault was succesfully handled. */
1842 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1844 unsigned int page_index, prot, pindex;
1846 target_ulong host_start, host_end, addr;
1848 host_start = address & qemu_host_page_mask;
1849 page_index = host_start >> TARGET_PAGE_BITS;
1850 p1 = page_find(page_index);
1853 host_end = host_start + qemu_host_page_size;
1856 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1860 /* if the page was really writable, then we change its
1861 protection back to writable */
1862 if (prot & PAGE_WRITE_ORG) {
1863 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1864 if (!(p1[pindex].flags & PAGE_WRITE)) {
1865 mprotect((void *)g2h(host_start), qemu_host_page_size,
1866 (prot & PAGE_BITS) | PAGE_WRITE);
1867 p1[pindex].flags |= PAGE_WRITE;
1868 /* and since the content will be modified, we must invalidate
1869 the corresponding translated code. */
1870 tb_invalidate_phys_page(address, pc, puc);
1871 #ifdef DEBUG_TB_CHECK
1872 tb_invalidate_check(address);
1880 /* call this function when system calls directly modify a memory area */
1881 /* ??? This should be redundant now we have lock_user. */
1882 void page_unprotect_range(target_ulong data, target_ulong data_size)
1884 target_ulong start, end, addr;
1887 end = start + data_size;
1888 start &= TARGET_PAGE_MASK;
1889 end = TARGET_PAGE_ALIGN(end);
1890 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1891 page_unprotect(addr, 0, NULL);
1895 static inline void tlb_set_dirty(CPUState *env,
1896 unsigned long addr, target_ulong vaddr)
1899 #endif /* defined(CONFIG_USER_ONLY) */
1901 /* register physical memory. 'size' must be a multiple of the target
1902 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1904 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1906 unsigned long phys_offset)
1908 target_phys_addr_t addr, end_addr;
1912 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1913 end_addr = start_addr + size;
1914 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1915 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1916 p->phys_offset = phys_offset;
1917 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1918 (phys_offset & IO_MEM_ROMD))
1919 phys_offset += TARGET_PAGE_SIZE;
1922 /* since each CPU stores ram addresses in its TLB cache, we must
1923 reset the modified entries */
1925 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1930 /* XXX: temporary until new memory mapping API */
1931 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1935 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1937 return IO_MEM_UNASSIGNED;
1938 return p->phys_offset;
1941 /* XXX: better than nothing */
1942 ram_addr_t qemu_ram_alloc(unsigned int size)
1945 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1946 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1947 size, phys_ram_size);
1950 addr = phys_ram_alloc_offset;
1951 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1955 void qemu_ram_free(ram_addr_t addr)
1959 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1961 #ifdef DEBUG_UNASSIGNED
1962 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
1965 do_unassigned_access(addr, 0, 0, 0);
1970 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1972 #ifdef DEBUG_UNASSIGNED
1973 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
1976 do_unassigned_access(addr, 1, 0, 0);
1980 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1981 unassigned_mem_readb,
1982 unassigned_mem_readb,
1983 unassigned_mem_readb,
1986 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1987 unassigned_mem_writeb,
1988 unassigned_mem_writeb,
1989 unassigned_mem_writeb,
1992 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1994 unsigned long ram_addr;
1996 ram_addr = addr - (unsigned long)phys_ram_base;
1997 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1998 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1999 #if !defined(CONFIG_USER_ONLY)
2000 tb_invalidate_phys_page_fast(ram_addr, 1);
2001 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2004 stb_p((uint8_t *)(long)addr, val);
2006 if (cpu_single_env->kqemu_enabled &&
2007 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2008 kqemu_modify_page(cpu_single_env, ram_addr);
2010 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012 /* we remove the notdirty callback only if the code has been
2014 if (dirty_flags == 0xff)
2015 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2018 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2020 unsigned long ram_addr;
2022 ram_addr = addr - (unsigned long)phys_ram_base;
2023 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2025 #if !defined(CONFIG_USER_ONLY)
2026 tb_invalidate_phys_page_fast(ram_addr, 2);
2027 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2030 stw_p((uint8_t *)(long)addr, val);
2032 if (cpu_single_env->kqemu_enabled &&
2033 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2034 kqemu_modify_page(cpu_single_env, ram_addr);
2036 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2037 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2038 /* we remove the notdirty callback only if the code has been
2040 if (dirty_flags == 0xff)
2041 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2044 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2046 unsigned long ram_addr;
2048 ram_addr = addr - (unsigned long)phys_ram_base;
2049 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2050 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2051 #if !defined(CONFIG_USER_ONLY)
2052 tb_invalidate_phys_page_fast(ram_addr, 4);
2053 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2056 stl_p((uint8_t *)(long)addr, val);
2058 if (cpu_single_env->kqemu_enabled &&
2059 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2060 kqemu_modify_page(cpu_single_env, ram_addr);
2062 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2063 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2064 /* we remove the notdirty callback only if the code has been
2066 if (dirty_flags == 0xff)
2067 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2070 static CPUReadMemoryFunc *error_mem_read[3] = {
2071 NULL, /* never used */
2072 NULL, /* never used */
2073 NULL, /* never used */
2076 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2077 notdirty_mem_writeb,
2078 notdirty_mem_writew,
2079 notdirty_mem_writel,
2082 #if defined(CONFIG_SOFTMMU)
2083 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2086 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2088 return ldub_phys(addr);
2091 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2093 return lduw_phys(addr);
2096 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2098 return ldl_phys(addr);
2101 /* Generate a debug exception if a watchpoint has been hit.
2102 Returns the real physical address of the access. addr will be a host
2103 address in the is_ram case. */
2104 static target_ulong check_watchpoint(target_phys_addr_t addr)
2106 CPUState *env = cpu_single_env;
2108 target_ulong retaddr;
2112 for (i = 0; i < env->nb_watchpoints; i++) {
2113 watch = env->watchpoint[i].vaddr;
2114 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2115 if (env->watchpoint[i].is_ram)
2116 retaddr = addr - (unsigned long)phys_ram_base;
2117 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2118 cpu_single_env->watchpoint_hit = i + 1;
2119 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2127 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2130 addr = check_watchpoint(addr);
2131 stb_phys(addr, val);
2134 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2137 addr = check_watchpoint(addr);
2138 stw_phys(addr, val);
2141 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2144 addr = check_watchpoint(addr);
2145 stl_phys(addr, val);
2148 static CPUReadMemoryFunc *watch_mem_read[3] = {
2154 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2161 static void io_mem_init(void)
2163 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2164 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2165 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2168 #if defined(CONFIG_SOFTMMU)
2169 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2170 watch_mem_write, NULL);
2172 /* alloc dirty bits array */
2173 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2174 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2177 /* mem_read and mem_write are arrays of functions containing the
2178 function to access byte (index 0), word (index 1) and dword (index
2179 2). All functions must be supplied. If io_index is non zero, the
2180 corresponding io zone is modified. If it is zero, a new io zone is
2181 allocated. The return value can be used with
2182 cpu_register_physical_memory(). (-1) is returned if error. */
2183 int cpu_register_io_memory(int io_index,
2184 CPUReadMemoryFunc **mem_read,
2185 CPUWriteMemoryFunc **mem_write,
2190 if (io_index <= 0) {
2191 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2193 io_index = io_mem_nb++;
2195 if (io_index >= IO_MEM_NB_ENTRIES)
2199 for(i = 0;i < 3; i++) {
2200 io_mem_read[io_index][i] = mem_read[i];
2201 io_mem_write[io_index][i] = mem_write[i];
2203 io_mem_opaque[io_index] = opaque;
2204 return io_index << IO_MEM_SHIFT;
2207 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2209 return io_mem_write[io_index >> IO_MEM_SHIFT];
2212 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2214 return io_mem_read[io_index >> IO_MEM_SHIFT];
2217 /* physical memory access (slow version, mainly for debug) */
2218 #if defined(CONFIG_USER_ONLY)
2219 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2220 int len, int is_write)
2227 page = addr & TARGET_PAGE_MASK;
2228 l = (page + TARGET_PAGE_SIZE) - addr;
2231 flags = page_get_flags(page);
2232 if (!(flags & PAGE_VALID))
2235 if (!(flags & PAGE_WRITE))
2237 p = lock_user(addr, len, 0);
2238 memcpy(p, buf, len);
2239 unlock_user(p, addr, len);
2241 if (!(flags & PAGE_READ))
2243 p = lock_user(addr, len, 1);
2244 memcpy(buf, p, len);
2245 unlock_user(p, addr, 0);
2254 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2255 int len, int is_write)
2260 target_phys_addr_t page;
2265 page = addr & TARGET_PAGE_MASK;
2266 l = (page + TARGET_PAGE_SIZE) - addr;
2269 p = phys_page_find(page >> TARGET_PAGE_BITS);
2271 pd = IO_MEM_UNASSIGNED;
2273 pd = p->phys_offset;
2277 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2278 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2279 /* XXX: could force cpu_single_env to NULL to avoid
2281 if (l >= 4 && ((addr & 3) == 0)) {
2282 /* 32 bit write access */
2284 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2286 } else if (l >= 2 && ((addr & 1) == 0)) {
2287 /* 16 bit write access */
2289 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2292 /* 8 bit write access */
2294 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2298 unsigned long addr1;
2299 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2301 ptr = phys_ram_base + addr1;
2302 memcpy(ptr, buf, l);
2303 if (!cpu_physical_memory_is_dirty(addr1)) {
2304 /* invalidate code */
2305 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2307 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2308 (0xff & ~CODE_DIRTY_FLAG);
2312 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2313 !(pd & IO_MEM_ROMD)) {
2315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2316 if (l >= 4 && ((addr & 3) == 0)) {
2317 /* 32 bit read access */
2318 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2321 } else if (l >= 2 && ((addr & 1) == 0)) {
2322 /* 16 bit read access */
2323 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2327 /* 8 bit read access */
2328 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2334 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2335 (addr & ~TARGET_PAGE_MASK);
2336 memcpy(buf, ptr, l);
2345 /* used for ROM loading : can write in RAM and ROM */
2346 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2347 const uint8_t *buf, int len)
2351 target_phys_addr_t page;
2356 page = addr & TARGET_PAGE_MASK;
2357 l = (page + TARGET_PAGE_SIZE) - addr;
2360 p = phys_page_find(page >> TARGET_PAGE_BITS);
2362 pd = IO_MEM_UNASSIGNED;
2364 pd = p->phys_offset;
2367 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2368 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2369 !(pd & IO_MEM_ROMD)) {
2372 unsigned long addr1;
2373 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2375 ptr = phys_ram_base + addr1;
2376 memcpy(ptr, buf, l);
2385 /* warning: addr must be aligned */
2386 uint32_t ldl_phys(target_phys_addr_t addr)
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2396 pd = IO_MEM_UNASSIGNED;
2398 pd = p->phys_offset;
2401 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2402 !(pd & IO_MEM_ROMD)) {
2404 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2405 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2408 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2409 (addr & ~TARGET_PAGE_MASK);
2415 /* warning: addr must be aligned */
2416 uint64_t ldq_phys(target_phys_addr_t addr)
2424 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 pd = IO_MEM_UNASSIGNED;
2428 pd = p->phys_offset;
2431 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2432 !(pd & IO_MEM_ROMD)) {
2434 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2435 #ifdef TARGET_WORDS_BIGENDIAN
2436 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2437 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2440 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2444 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2445 (addr & ~TARGET_PAGE_MASK);
2452 uint32_t ldub_phys(target_phys_addr_t addr)
2455 cpu_physical_memory_read(addr, &val, 1);
2460 uint32_t lduw_phys(target_phys_addr_t addr)
2463 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2464 return tswap16(val);
2467 /* warning: addr must be aligned. The ram page is not masked as dirty
2468 and the code inside is not invalidated. It is useful if the dirty
2469 bits are used to track modified PTEs */
2470 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2477 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2479 pd = IO_MEM_UNASSIGNED;
2481 pd = p->phys_offset;
2484 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2485 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2486 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2489 (addr & ~TARGET_PAGE_MASK);
2494 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2501 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2503 pd = IO_MEM_UNASSIGNED;
2505 pd = p->phys_offset;
2508 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2509 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2510 #ifdef TARGET_WORDS_BIGENDIAN
2511 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2512 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2514 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2515 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2518 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2519 (addr & ~TARGET_PAGE_MASK);
2524 /* warning: addr must be aligned */
2525 void stl_phys(target_phys_addr_t addr, uint32_t val)
2532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2534 pd = IO_MEM_UNASSIGNED;
2536 pd = p->phys_offset;
2539 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2540 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2541 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2543 unsigned long addr1;
2544 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2546 ptr = phys_ram_base + addr1;
2548 if (!cpu_physical_memory_is_dirty(addr1)) {
2549 /* invalidate code */
2550 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2552 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2553 (0xff & ~CODE_DIRTY_FLAG);
2559 void stb_phys(target_phys_addr_t addr, uint32_t val)
2562 cpu_physical_memory_write(addr, &v, 1);
2566 void stw_phys(target_phys_addr_t addr, uint32_t val)
2568 uint16_t v = tswap16(val);
2569 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2573 void stq_phys(target_phys_addr_t addr, uint64_t val)
2576 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2581 /* virtual memory access for debug */
2582 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2583 uint8_t *buf, int len, int is_write)
2586 target_phys_addr_t phys_addr;
2590 page = addr & TARGET_PAGE_MASK;
2591 phys_addr = cpu_get_phys_page_debug(env, page);
2592 /* if no physical page mapped, return an error */
2593 if (phys_addr == -1)
2595 l = (page + TARGET_PAGE_SIZE) - addr;
2598 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2607 void dump_exec_info(FILE *f,
2608 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2610 int i, target_code_size, max_target_code_size;
2611 int direct_jmp_count, direct_jmp2_count, cross_page;
2612 TranslationBlock *tb;
2614 target_code_size = 0;
2615 max_target_code_size = 0;
2617 direct_jmp_count = 0;
2618 direct_jmp2_count = 0;
2619 for(i = 0; i < nb_tbs; i++) {
2621 target_code_size += tb->size;
2622 if (tb->size > max_target_code_size)
2623 max_target_code_size = tb->size;
2624 if (tb->page_addr[1] != -1)
2626 if (tb->tb_next_offset[0] != 0xffff) {
2628 if (tb->tb_next_offset[1] != 0xffff) {
2629 direct_jmp2_count++;
2633 /* XXX: avoid using doubles ? */
2634 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2635 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2636 nb_tbs ? target_code_size / nb_tbs : 0,
2637 max_target_code_size);
2638 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2639 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2640 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2641 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2643 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2644 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2646 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2648 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2649 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2650 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2651 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2654 #if !defined(CONFIG_USER_ONLY)
2656 #define MMUSUFFIX _cmmu
2657 #define GETPC() NULL
2658 #define env cpu_single_env
2659 #define SOFTMMU_CODE_ACCESS
2662 #include "softmmu_template.h"
2665 #include "softmmu_template.h"
2668 #include "softmmu_template.h"
2671 #include "softmmu_template.h"