2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
42 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue[1024] code_gen_section;
103 static uint8_t *code_gen_buffer;
104 static unsigned long code_gen_buffer_size;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size;
107 uint8_t *code_gen_ptr;
109 #if !defined(CONFIG_USER_ONLY)
111 uint8_t *phys_ram_dirty;
112 static int in_migration;
114 typedef struct RAMBlock {
118 struct RAMBlock *next;
121 static RAMBlock *ram_blocks;
122 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123 then we can no longet assume contiguous ram offsets, and external uses
124 of this variable will break. */
125 ram_addr_t last_ram_offset;
129 /* current CPU in the current thread. It is only valid inside
131 CPUState *cpu_single_env;
132 /* 0 = Do not count executed instructions.
133 1 = Precise instruction counting.
134 2 = Adaptive rate instruction counting. */
136 /* Current instruction counter. While executing translated code this may
137 include some instructions that have not yet been executed. */
140 typedef struct PageDesc {
141 /* list of TBs intersecting this ram page */
142 TranslationBlock *first_tb;
143 /* in order to optimize self modifying code, we count the number
144 of lookups we do to a given page to use a bitmap */
145 unsigned int code_write_count;
146 uint8_t *code_bitmap;
147 #if defined(CONFIG_USER_ONLY)
152 typedef struct PhysPageDesc {
153 /* offset in host memory of the page + io_index in the low bits */
154 ram_addr_t phys_offset;
155 ram_addr_t region_offset;
159 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160 /* XXX: this is a temporary hack for alpha target.
161 * In the future, this is to be replaced by a multi-level table
162 * to actually be able to handle the complete 64 bits address space.
164 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_SIZE (1 << L1_BITS)
170 #define L2_SIZE (1 << L2_BITS)
172 unsigned long qemu_real_host_page_size;
173 unsigned long qemu_host_page_bits;
174 unsigned long qemu_host_page_size;
175 unsigned long qemu_host_page_mask;
177 /* XXX: for system emulation, it could just be an array */
178 static PageDesc *l1_map[L1_SIZE];
179 static PhysPageDesc **l1_phys_map;
181 #if !defined(CONFIG_USER_ONLY)
182 static void io_mem_init(void);
184 /* io memory support */
185 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188 static char io_mem_used[IO_MEM_NB_ENTRIES];
189 static int io_mem_watch;
193 static const char *logfilename = "/tmp/qemu.log";
196 static int log_append = 0;
199 static int tlb_flush_count;
200 static int tb_flush_count;
201 static int tb_phys_invalidate_count;
203 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 typedef struct subpage_t {
205 target_phys_addr_t base;
206 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208 void *opaque[TARGET_PAGE_SIZE][2][4];
209 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
213 static void map_exec(void *addr, long size)
216 VirtualProtect(addr, size,
217 PAGE_EXECUTE_READWRITE, &old_protect);
221 static void map_exec(void *addr, long size)
223 unsigned long start, end, page_size;
225 page_size = getpagesize();
226 start = (unsigned long)addr;
227 start &= ~(page_size - 1);
229 end = (unsigned long)addr + size;
230 end += page_size - 1;
231 end &= ~(page_size - 1);
233 mprotect((void *)start, end - start,
234 PROT_READ | PROT_WRITE | PROT_EXEC);
238 static void page_init(void)
240 /* NOTE: we can always suppose that qemu_host_page_size >=
244 SYSTEM_INFO system_info;
246 GetSystemInfo(&system_info);
247 qemu_real_host_page_size = system_info.dwPageSize;
250 qemu_real_host_page_size = getpagesize();
252 if (qemu_host_page_size == 0)
253 qemu_host_page_size = qemu_real_host_page_size;
254 if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 qemu_host_page_size = TARGET_PAGE_SIZE;
256 qemu_host_page_bits = 0;
257 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 qemu_host_page_bits++;
259 qemu_host_page_mask = ~(qemu_host_page_size - 1);
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 long long startaddr, endaddr;
270 last_brk = (unsigned long)sbrk(0);
271 f = fopen("/proc/self/maps", "r");
274 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276 startaddr = MIN(startaddr,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 endaddr = MIN(endaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280 page_set_flags(startaddr & TARGET_PAGE_MASK,
281 TARGET_PAGE_ALIGN(endaddr),
292 static inline PageDesc **page_l1_map(target_ulong index)
294 #if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
297 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300 return &l1_map[index >> L2_BITS];
303 static inline PageDesc *page_find_alloc(target_ulong index)
306 lp = page_l1_map(index);
312 /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
314 size_t len = sizeof(PageDesc) * L2_SIZE;
315 /* Don't use qemu_malloc because it may recurse. */
316 p = mmap(0, len, PROT_READ | PROT_WRITE,
317 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320 unsigned long addr = h2g(p);
321 page_set_flags(addr & TARGET_PAGE_MASK,
322 TARGET_PAGE_ALIGN(addr + len),
326 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330 return p + (index & (L2_SIZE - 1));
333 static inline PageDesc *page_find(target_ulong index)
336 lp = page_l1_map(index);
343 return p + (index & (L2_SIZE - 1));
346 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
351 p = (void **)l1_phys_map;
352 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
354 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
360 /* allocate if not found */
363 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 memset(p, 0, sizeof(void *) * L1_SIZE);
368 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372 /* allocate if not found */
375 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
377 for (i = 0; i < L2_SIZE; i++) {
378 pd[i].phys_offset = IO_MEM_UNASSIGNED;
379 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
382 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
385 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
387 return phys_page_find_alloc(index, 0);
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr);
392 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommanded to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
410 static void code_gen_alloc(unsigned long tb_size)
412 #ifdef USE_STATIC_CODE_GEN_BUFFER
413 code_gen_buffer = static_code_gen_buffer;
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 map_exec(code_gen_buffer, code_gen_buffer_size);
417 code_gen_buffer_size = tb_size;
418 if (code_gen_buffer_size == 0) {
419 #if defined(CONFIG_USER_ONLY)
420 /* in user mode, phys_ram_size is not meaningful */
421 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 /* XXX: needs ajustments */
424 code_gen_buffer_size = (unsigned long)(ram_size / 4);
427 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 /* The code gen buffer location may have constraints depending on
430 the host cpu and OS */
431 #if defined(__linux__)
436 flags = MAP_PRIVATE | MAP_ANONYMOUS;
437 #if defined(__x86_64__)
439 /* Cannot map more than that */
440 if (code_gen_buffer_size > (800 * 1024 * 1024))
441 code_gen_buffer_size = (800 * 1024 * 1024);
442 #elif defined(__sparc_v9__)
443 // Map the buffer below 2G, so we can use direct calls and branches
445 start = (void *) 0x60000000UL;
446 if (code_gen_buffer_size > (512 * 1024 * 1024))
447 code_gen_buffer_size = (512 * 1024 * 1024);
448 #elif defined(__arm__)
449 /* Map the buffer below 32M, so we can use direct calls and branches */
451 start = (void *) 0x01000000UL;
452 if (code_gen_buffer_size > 16 * 1024 * 1024)
453 code_gen_buffer_size = 16 * 1024 * 1024;
455 code_gen_buffer = mmap(start, code_gen_buffer_size,
456 PROT_WRITE | PROT_READ | PROT_EXEC,
458 if (code_gen_buffer == MAP_FAILED) {
459 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 #elif defined(__FreeBSD__) || defined(__DragonFly__)
467 flags = MAP_PRIVATE | MAP_ANONYMOUS;
468 #if defined(__x86_64__)
469 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 * 0x40000000 is free */
472 addr = (void *)0x40000000;
473 /* Cannot map more than that */
474 if (code_gen_buffer_size > (800 * 1024 * 1024))
475 code_gen_buffer_size = (800 * 1024 * 1024);
477 code_gen_buffer = mmap(addr, code_gen_buffer_size,
478 PROT_WRITE | PROT_READ | PROT_EXEC,
480 if (code_gen_buffer == MAP_FAILED) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
486 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487 map_exec(code_gen_buffer, code_gen_buffer_size);
489 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
490 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 code_gen_buffer_max_size = code_gen_buffer_size -
492 code_gen_max_block_size();
493 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497 /* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
500 void cpu_exec_init_all(unsigned long tb_size)
503 code_gen_alloc(tb_size);
504 code_gen_ptr = code_gen_buffer;
506 #if !defined(CONFIG_USER_ONLY)
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 #define CPU_COMMON_SAVE_VERSION 1
515 static void cpu_common_save(QEMUFile *f, void *opaque)
517 CPUState *env = opaque;
519 qemu_put_be32s(f, &env->halted);
520 qemu_put_be32s(f, &env->interrupt_request);
523 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
525 CPUState *env = opaque;
527 if (version_id != CPU_COMMON_SAVE_VERSION)
530 qemu_get_be32s(f, &env->halted);
531 qemu_get_be32s(f, &env->interrupt_request);
532 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533 version_id is increased. */
534 env->interrupt_request &= ~0x01;
541 void cpu_exec_init(CPUState *env)
546 #if defined(CONFIG_USER_ONLY)
549 env->next_cpu = NULL;
552 while (*penv != NULL) {
553 penv = (CPUState **)&(*penv)->next_cpu;
556 env->cpu_index = cpu_index;
558 TAILQ_INIT(&env->breakpoints);
559 TAILQ_INIT(&env->watchpoints);
561 #if defined(CONFIG_USER_ONLY)
564 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
565 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
566 cpu_common_save, cpu_common_load, env);
567 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
568 cpu_save, cpu_load, env);
572 static inline void invalidate_page_bitmap(PageDesc *p)
574 if (p->code_bitmap) {
575 qemu_free(p->code_bitmap);
576 p->code_bitmap = NULL;
578 p->code_write_count = 0;
581 /* set to NULL all the 'first_tb' fields in all PageDescs */
582 static void page_flush_tb(void)
587 for(i = 0; i < L1_SIZE; i++) {
590 for(j = 0; j < L2_SIZE; j++) {
592 invalidate_page_bitmap(p);
599 /* flush all the translation blocks */
600 /* XXX: tb_flush is currently not thread safe */
601 void tb_flush(CPUState *env1)
604 #if defined(DEBUG_FLUSH)
605 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
606 (unsigned long)(code_gen_ptr - code_gen_buffer),
608 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
610 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
611 cpu_abort(env1, "Internal error: code buffer overflow\n");
615 for(env = first_cpu; env != NULL; env = env->next_cpu) {
616 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
619 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
622 code_gen_ptr = code_gen_buffer;
623 /* XXX: flush processor icache at this point if cache flush is
628 #ifdef DEBUG_TB_CHECK
630 static void tb_invalidate_check(target_ulong address)
632 TranslationBlock *tb;
634 address &= TARGET_PAGE_MASK;
635 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
636 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
637 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
638 address >= tb->pc + tb->size)) {
639 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
640 address, (long)tb->pc, tb->size);
646 /* verify that all the pages have correct rights for code */
647 static void tb_page_check(void)
649 TranslationBlock *tb;
650 int i, flags1, flags2;
652 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
653 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
654 flags1 = page_get_flags(tb->pc);
655 flags2 = page_get_flags(tb->pc + tb->size - 1);
656 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
657 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
658 (long)tb->pc, tb->size, flags1, flags2);
664 static void tb_jmp_check(TranslationBlock *tb)
666 TranslationBlock *tb1;
669 /* suppress any remaining jumps to this TB */
673 tb1 = (TranslationBlock *)((long)tb1 & ~3);
676 tb1 = tb1->jmp_next[n1];
678 /* check end of list */
680 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
686 /* invalidate one TB */
687 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
690 TranslationBlock *tb1;
694 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
697 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
701 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
703 TranslationBlock *tb1;
709 tb1 = (TranslationBlock *)((long)tb1 & ~3);
711 *ptb = tb1->page_next[n1];
714 ptb = &tb1->page_next[n1];
718 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
720 TranslationBlock *tb1, **ptb;
723 ptb = &tb->jmp_next[n];
726 /* find tb(n) in circular list */
730 tb1 = (TranslationBlock *)((long)tb1 & ~3);
731 if (n1 == n && tb1 == tb)
734 ptb = &tb1->jmp_first;
736 ptb = &tb1->jmp_next[n1];
739 /* now we can suppress tb(n) from the list */
740 *ptb = tb->jmp_next[n];
742 tb->jmp_next[n] = NULL;
746 /* reset the jump entry 'n' of a TB so that it is not chained to
748 static inline void tb_reset_jump(TranslationBlock *tb, int n)
750 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
753 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
758 target_phys_addr_t phys_pc;
759 TranslationBlock *tb1, *tb2;
761 /* remove the TB from the hash list */
762 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763 h = tb_phys_hash_func(phys_pc);
764 tb_remove(&tb_phys_hash[h], tb,
765 offsetof(TranslationBlock, phys_hash_next));
767 /* remove the TB from the page list */
768 if (tb->page_addr[0] != page_addr) {
769 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770 tb_page_remove(&p->first_tb, tb);
771 invalidate_page_bitmap(p);
773 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775 tb_page_remove(&p->first_tb, tb);
776 invalidate_page_bitmap(p);
779 tb_invalidated_flag = 1;
781 /* remove the TB from the hash list */
782 h = tb_jmp_cache_hash_func(tb->pc);
783 for(env = first_cpu; env != NULL; env = env->next_cpu) {
784 if (env->tb_jmp_cache[h] == tb)
785 env->tb_jmp_cache[h] = NULL;
788 /* suppress this TB from the two jump lists */
789 tb_jmp_remove(tb, 0);
790 tb_jmp_remove(tb, 1);
792 /* suppress any remaining jumps to this TB */
798 tb1 = (TranslationBlock *)((long)tb1 & ~3);
799 tb2 = tb1->jmp_next[n1];
800 tb_reset_jump(tb1, n1);
801 tb1->jmp_next[n1] = NULL;
804 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
806 tb_phys_invalidate_count++;
809 static inline void set_bits(uint8_t *tab, int start, int len)
815 mask = 0xff << (start & 7);
816 if ((start & ~7) == (end & ~7)) {
818 mask &= ~(0xff << (end & 7));
823 start = (start + 8) & ~7;
825 while (start < end1) {
830 mask = ~(0xff << (end & 7));
836 static void build_page_bitmap(PageDesc *p)
838 int n, tb_start, tb_end;
839 TranslationBlock *tb;
841 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
846 tb = (TranslationBlock *)((long)tb & ~3);
847 /* NOTE: this is subtle as a TB may span two physical pages */
849 /* NOTE: tb_end may be after the end of the page, but
850 it is not a problem */
851 tb_start = tb->pc & ~TARGET_PAGE_MASK;
852 tb_end = tb_start + tb->size;
853 if (tb_end > TARGET_PAGE_SIZE)
854 tb_end = TARGET_PAGE_SIZE;
857 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
859 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860 tb = tb->page_next[n];
864 TranslationBlock *tb_gen_code(CPUState *env,
865 target_ulong pc, target_ulong cs_base,
866 int flags, int cflags)
868 TranslationBlock *tb;
870 target_ulong phys_pc, phys_page2, virt_page2;
873 phys_pc = get_phys_addr_code(env, pc);
876 /* flush must be done */
878 /* cannot fail at this point */
880 /* Don't forget to invalidate previous TB info. */
881 tb_invalidated_flag = 1;
883 tc_ptr = code_gen_ptr;
885 tb->cs_base = cs_base;
888 cpu_gen_code(env, tb, &code_gen_size);
889 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
891 /* check next page if needed */
892 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
894 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895 phys_page2 = get_phys_addr_code(env, virt_page2);
897 tb_link_phys(tb, phys_pc, phys_page2);
901 /* invalidate all TBs which intersect with the target physical page
902 starting in range [start;end[. NOTE: start and end must refer to
903 the same physical page. 'is_cpu_write_access' should be true if called
904 from a real cpu write access: the virtual CPU will exit the current
905 TB if code is modified inside this TB. */
906 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907 int is_cpu_write_access)
909 TranslationBlock *tb, *tb_next, *saved_tb;
910 CPUState *env = cpu_single_env;
911 target_ulong tb_start, tb_end;
914 #ifdef TARGET_HAS_PRECISE_SMC
915 int current_tb_not_found = is_cpu_write_access;
916 TranslationBlock *current_tb = NULL;
917 int current_tb_modified = 0;
918 target_ulong current_pc = 0;
919 target_ulong current_cs_base = 0;
920 int current_flags = 0;
921 #endif /* TARGET_HAS_PRECISE_SMC */
923 p = page_find(start >> TARGET_PAGE_BITS);
926 if (!p->code_bitmap &&
927 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928 is_cpu_write_access) {
929 /* build code bitmap */
930 build_page_bitmap(p);
933 /* we remove all the TBs in the range [start, end[ */
934 /* XXX: see if in some cases it could be faster to invalidate all the code */
938 tb = (TranslationBlock *)((long)tb & ~3);
939 tb_next = tb->page_next[n];
940 /* NOTE: this is subtle as a TB may span two physical pages */
942 /* NOTE: tb_end may be after the end of the page, but
943 it is not a problem */
944 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 tb_end = tb_start + tb->size;
947 tb_start = tb->page_addr[1];
948 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
950 if (!(tb_end <= start || tb_start >= end)) {
951 #ifdef TARGET_HAS_PRECISE_SMC
952 if (current_tb_not_found) {
953 current_tb_not_found = 0;
955 if (env->mem_io_pc) {
956 /* now we have a real cpu fault */
957 current_tb = tb_find_pc(env->mem_io_pc);
960 if (current_tb == tb &&
961 (current_tb->cflags & CF_COUNT_MASK) != 1) {
962 /* If we are modifying the current TB, we must stop
963 its execution. We could be more precise by checking
964 that the modification is after the current PC, but it
965 would require a specialized function to partially
966 restore the CPU state */
968 current_tb_modified = 1;
969 cpu_restore_state(current_tb, env,
970 env->mem_io_pc, NULL);
971 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
974 #endif /* TARGET_HAS_PRECISE_SMC */
975 /* we need to do that to handle the case where a signal
976 occurs while doing tb_phys_invalidate() */
979 saved_tb = env->current_tb;
980 env->current_tb = NULL;
982 tb_phys_invalidate(tb, -1);
984 env->current_tb = saved_tb;
985 if (env->interrupt_request && env->current_tb)
986 cpu_interrupt(env, env->interrupt_request);
991 #if !defined(CONFIG_USER_ONLY)
992 /* if no code remaining, no need to continue to use slow writes */
994 invalidate_page_bitmap(p);
995 if (is_cpu_write_access) {
996 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1000 #ifdef TARGET_HAS_PRECISE_SMC
1001 if (current_tb_modified) {
1002 /* we generate a block containing just the instruction
1003 modifying the memory. It will ensure that it cannot modify
1005 env->current_tb = NULL;
1006 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007 cpu_resume_from_signal(env, NULL);
1012 /* len must be <= 8 and start must be a multiple of len */
1013 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1019 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020 cpu_single_env->mem_io_vaddr, len,
1021 cpu_single_env->eip,
1022 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1025 p = page_find(start >> TARGET_PAGE_BITS);
1028 if (p->code_bitmap) {
1029 offset = start & ~TARGET_PAGE_MASK;
1030 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031 if (b & ((1 << len) - 1))
1035 tb_invalidate_phys_page_range(start, start + len, 1);
1039 #if !defined(CONFIG_SOFTMMU)
1040 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041 unsigned long pc, void *puc)
1043 TranslationBlock *tb;
1046 #ifdef TARGET_HAS_PRECISE_SMC
1047 TranslationBlock *current_tb = NULL;
1048 CPUState *env = cpu_single_env;
1049 int current_tb_modified = 0;
1050 target_ulong current_pc = 0;
1051 target_ulong current_cs_base = 0;
1052 int current_flags = 0;
1055 addr &= TARGET_PAGE_MASK;
1056 p = page_find(addr >> TARGET_PAGE_BITS);
1060 #ifdef TARGET_HAS_PRECISE_SMC
1061 if (tb && pc != 0) {
1062 current_tb = tb_find_pc(pc);
1065 while (tb != NULL) {
1067 tb = (TranslationBlock *)((long)tb & ~3);
1068 #ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb == tb &&
1070 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071 /* If we are modifying the current TB, we must stop
1072 its execution. We could be more precise by checking
1073 that the modification is after the current PC, but it
1074 would require a specialized function to partially
1075 restore the CPU state */
1077 current_tb_modified = 1;
1078 cpu_restore_state(current_tb, env, pc, puc);
1079 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1082 #endif /* TARGET_HAS_PRECISE_SMC */
1083 tb_phys_invalidate(tb, addr);
1084 tb = tb->page_next[n];
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb_modified) {
1089 /* we generate a block containing just the instruction
1090 modifying the memory. It will ensure that it cannot modify
1092 env->current_tb = NULL;
1093 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094 cpu_resume_from_signal(env, puc);
1100 /* add the tb in the target page and protect it if necessary */
1101 static inline void tb_alloc_page(TranslationBlock *tb,
1102 unsigned int n, target_ulong page_addr)
1105 TranslationBlock *last_first_tb;
1107 tb->page_addr[n] = page_addr;
1108 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109 tb->page_next[n] = p->first_tb;
1110 last_first_tb = p->first_tb;
1111 p->first_tb = (TranslationBlock *)((long)tb | n);
1112 invalidate_page_bitmap(p);
1114 #if defined(TARGET_HAS_SMC) || 1
1116 #if defined(CONFIG_USER_ONLY)
1117 if (p->flags & PAGE_WRITE) {
1122 /* force the host page as non writable (writes will have a
1123 page fault + mprotect overhead) */
1124 page_addr &= qemu_host_page_mask;
1126 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127 addr += TARGET_PAGE_SIZE) {
1129 p2 = page_find (addr >> TARGET_PAGE_BITS);
1133 p2->flags &= ~PAGE_WRITE;
1134 page_get_flags(addr);
1136 mprotect(g2h(page_addr), qemu_host_page_size,
1137 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138 #ifdef DEBUG_TB_INVALIDATE
1139 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1144 /* if some code is already present, then the pages are already
1145 protected. So we handle the case where only the first TB is
1146 allocated in a physical page */
1147 if (!last_first_tb) {
1148 tlb_protect_code(page_addr);
1152 #endif /* TARGET_HAS_SMC */
1155 /* Allocate a new translation block. Flush the translation buffer if
1156 too many translation blocks or too much generated code. */
1157 TranslationBlock *tb_alloc(target_ulong pc)
1159 TranslationBlock *tb;
1161 if (nb_tbs >= code_gen_max_blocks ||
1162 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1164 tb = &tbs[nb_tbs++];
1170 void tb_free(TranslationBlock *tb)
1172 /* In practice this is mostly used for single use temporary TB
1173 Ignore the hard cases and just back up if this TB happens to
1174 be the last one generated. */
1175 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176 code_gen_ptr = tb->tc_ptr;
1181 /* add a new TB and link it to the physical page tables. phys_page2 is
1182 (-1) to indicate that only one page contains the TB. */
1183 void tb_link_phys(TranslationBlock *tb,
1184 target_ulong phys_pc, target_ulong phys_page2)
1187 TranslationBlock **ptb;
1189 /* Grab the mmap lock to stop another thread invalidating this TB
1190 before we are done. */
1192 /* add in the physical hash table */
1193 h = tb_phys_hash_func(phys_pc);
1194 ptb = &tb_phys_hash[h];
1195 tb->phys_hash_next = *ptb;
1198 /* add in the page list */
1199 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200 if (phys_page2 != -1)
1201 tb_alloc_page(tb, 1, phys_page2);
1203 tb->page_addr[1] = -1;
1205 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206 tb->jmp_next[0] = NULL;
1207 tb->jmp_next[1] = NULL;
1209 /* init original jump addresses */
1210 if (tb->tb_next_offset[0] != 0xffff)
1211 tb_reset_jump(tb, 0);
1212 if (tb->tb_next_offset[1] != 0xffff)
1213 tb_reset_jump(tb, 1);
1215 #ifdef DEBUG_TB_CHECK
1221 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222 tb[1].tc_ptr. Return NULL if not found */
1223 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1225 int m_min, m_max, m;
1227 TranslationBlock *tb;
1231 if (tc_ptr < (unsigned long)code_gen_buffer ||
1232 tc_ptr >= (unsigned long)code_gen_ptr)
1234 /* binary search (cf Knuth) */
1237 while (m_min <= m_max) {
1238 m = (m_min + m_max) >> 1;
1240 v = (unsigned long)tb->tc_ptr;
1243 else if (tc_ptr < v) {
1252 static void tb_reset_jump_recursive(TranslationBlock *tb);
1254 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1256 TranslationBlock *tb1, *tb_next, **ptb;
1259 tb1 = tb->jmp_next[n];
1261 /* find head of list */
1264 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 tb1 = tb1->jmp_next[n1];
1269 /* we are now sure now that tb jumps to tb1 */
1272 /* remove tb from the jmp_first list */
1273 ptb = &tb_next->jmp_first;
1277 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278 if (n1 == n && tb1 == tb)
1280 ptb = &tb1->jmp_next[n1];
1282 *ptb = tb->jmp_next[n];
1283 tb->jmp_next[n] = NULL;
1285 /* suppress the jump to next tb in generated code */
1286 tb_reset_jump(tb, n);
1288 /* suppress jumps in the tb on which we could have jumped */
1289 tb_reset_jump_recursive(tb_next);
1293 static void tb_reset_jump_recursive(TranslationBlock *tb)
1295 tb_reset_jump_recursive2(tb, 0);
1296 tb_reset_jump_recursive2(tb, 1);
1299 #if defined(TARGET_HAS_ICE)
1300 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1302 target_phys_addr_t addr;
1304 ram_addr_t ram_addr;
1307 addr = cpu_get_phys_page_debug(env, pc);
1308 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1310 pd = IO_MEM_UNASSIGNED;
1312 pd = p->phys_offset;
1314 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1319 /* Add a watchpoint. */
1320 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321 int flags, CPUWatchpoint **watchpoint)
1323 target_ulong len_mask = ~(len - 1);
1326 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1332 wp = qemu_malloc(sizeof(*wp));
1335 wp->len_mask = len_mask;
1338 /* keep all GDB-injected watchpoints in front */
1340 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1342 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1344 tlb_flush_page(env, addr);
1351 /* Remove a specific watchpoint. */
1352 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1355 target_ulong len_mask = ~(len - 1);
1358 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359 if (addr == wp->vaddr && len_mask == wp->len_mask
1360 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361 cpu_watchpoint_remove_by_ref(env, wp);
1368 /* Remove a specific watchpoint by reference. */
1369 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1371 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1373 tlb_flush_page(env, watchpoint->vaddr);
1375 qemu_free(watchpoint);
1378 /* Remove all matching watchpoints. */
1379 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1381 CPUWatchpoint *wp, *next;
1383 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384 if (wp->flags & mask)
1385 cpu_watchpoint_remove_by_ref(env, wp);
1389 /* Add a breakpoint. */
1390 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391 CPUBreakpoint **breakpoint)
1393 #if defined(TARGET_HAS_ICE)
1396 bp = qemu_malloc(sizeof(*bp));
1401 /* keep all GDB-injected breakpoints in front */
1403 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1405 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1407 breakpoint_invalidate(env, pc);
1417 /* Remove a specific breakpoint. */
1418 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1420 #if defined(TARGET_HAS_ICE)
1423 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424 if (bp->pc == pc && bp->flags == flags) {
1425 cpu_breakpoint_remove_by_ref(env, bp);
1435 /* Remove a specific breakpoint by reference. */
1436 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1438 #if defined(TARGET_HAS_ICE)
1439 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1441 breakpoint_invalidate(env, breakpoint->pc);
1443 qemu_free(breakpoint);
1447 /* Remove all matching breakpoints. */
1448 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1450 #if defined(TARGET_HAS_ICE)
1451 CPUBreakpoint *bp, *next;
1453 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454 if (bp->flags & mask)
1455 cpu_breakpoint_remove_by_ref(env, bp);
1460 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1461 CPU loop after each instruction */
1462 void cpu_single_step(CPUState *env, int enabled)
1464 #if defined(TARGET_HAS_ICE)
1465 if (env->singlestep_enabled != enabled) {
1466 env->singlestep_enabled = enabled;
1468 kvm_update_guest_debug(env, 0);
1470 /* must flush all the translated code to avoid inconsistancies */
1471 /* XXX: only flush what is necessary */
1478 /* enable or disable low levels log */
1479 void cpu_set_log(int log_flags)
1481 loglevel = log_flags;
1482 if (loglevel && !logfile) {
1483 logfile = fopen(logfilename, log_append ? "a" : "w");
1485 perror(logfilename);
1488 #if !defined(CONFIG_SOFTMMU)
1489 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1491 static char logfile_buf[4096];
1492 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1495 setvbuf(logfile, NULL, _IOLBF, 0);
1499 if (!loglevel && logfile) {
1505 void cpu_set_log_filename(const char *filename)
1507 logfilename = strdup(filename);
1512 cpu_set_log(loglevel);
1515 static void cpu_unlink_tb(CPUState *env)
1517 #if defined(USE_NPTL)
1518 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1519 problem and hope the cpu will stop of its own accord. For userspace
1520 emulation this often isn't actually as bad as it sounds. Often
1521 signals are used primarily to interrupt blocking syscalls. */
1523 TranslationBlock *tb;
1524 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1526 tb = env->current_tb;
1527 /* if the cpu is currently executing code, we must unlink it and
1528 all the potentially executing TB */
1529 if (tb && !testandset(&interrupt_lock)) {
1530 env->current_tb = NULL;
1531 tb_reset_jump_recursive(tb);
1532 resetlock(&interrupt_lock);
1537 /* mask must never be zero, except for A20 change call */
1538 void cpu_interrupt(CPUState *env, int mask)
1542 old_mask = env->interrupt_request;
1543 env->interrupt_request |= mask;
1546 env->icount_decr.u16.high = 0xffff;
1547 #ifndef CONFIG_USER_ONLY
1549 && (mask & ~old_mask) != 0) {
1550 cpu_abort(env, "Raised interrupt while not in I/O function");
1558 void cpu_reset_interrupt(CPUState *env, int mask)
1560 env->interrupt_request &= ~mask;
1563 void cpu_exit(CPUState *env)
1565 env->exit_request = 1;
1569 const CPULogItem cpu_log_items[] = {
1570 { CPU_LOG_TB_OUT_ASM, "out_asm",
1571 "show generated host assembly code for each compiled TB" },
1572 { CPU_LOG_TB_IN_ASM, "in_asm",
1573 "show target assembly code for each compiled TB" },
1574 { CPU_LOG_TB_OP, "op",
1575 "show micro ops for each compiled TB" },
1576 { CPU_LOG_TB_OP_OPT, "op_opt",
1579 "before eflags optimization and "
1581 "after liveness analysis" },
1582 { CPU_LOG_INT, "int",
1583 "show interrupts/exceptions in short format" },
1584 { CPU_LOG_EXEC, "exec",
1585 "show trace before each executed TB (lots of logs)" },
1586 { CPU_LOG_TB_CPU, "cpu",
1587 "show CPU state before block translation" },
1589 { CPU_LOG_PCALL, "pcall",
1590 "show protected mode far calls/returns/exceptions" },
1591 { CPU_LOG_RESET, "cpu_reset",
1592 "show CPU state before CPU resets" },
1595 { CPU_LOG_IOPORT, "ioport",
1596 "show all i/o ports accesses" },
1601 static int cmp1(const char *s1, int n, const char *s2)
1603 if (strlen(s2) != n)
1605 return memcmp(s1, s2, n) == 0;
1608 /* takes a comma separated list of log masks. Return 0 if error. */
1609 int cpu_str_to_log_mask(const char *str)
1611 const CPULogItem *item;
1618 p1 = strchr(p, ',');
1621 if(cmp1(p,p1-p,"all")) {
1622 for(item = cpu_log_items; item->mask != 0; item++) {
1626 for(item = cpu_log_items; item->mask != 0; item++) {
1627 if (cmp1(p, p1 - p, item->name))
1641 void cpu_abort(CPUState *env, const char *fmt, ...)
1648 fprintf(stderr, "qemu: fatal: ");
1649 vfprintf(stderr, fmt, ap);
1650 fprintf(stderr, "\n");
1652 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1654 cpu_dump_state(env, stderr, fprintf, 0);
1656 if (qemu_log_enabled()) {
1657 qemu_log("qemu: fatal: ");
1658 qemu_log_vprintf(fmt, ap2);
1661 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1663 log_cpu_state(env, 0);
1673 CPUState *cpu_copy(CPUState *env)
1675 CPUState *new_env = cpu_init(env->cpu_model_str);
1676 CPUState *next_cpu = new_env->next_cpu;
1677 int cpu_index = new_env->cpu_index;
1678 #if defined(TARGET_HAS_ICE)
1683 memcpy(new_env, env, sizeof(CPUState));
1685 /* Preserve chaining and index. */
1686 new_env->next_cpu = next_cpu;
1687 new_env->cpu_index = cpu_index;
1689 /* Clone all break/watchpoints.
1690 Note: Once we support ptrace with hw-debug register access, make sure
1691 BP_CPU break/watchpoints are handled correctly on clone. */
1692 TAILQ_INIT(&env->breakpoints);
1693 TAILQ_INIT(&env->watchpoints);
1694 #if defined(TARGET_HAS_ICE)
1695 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1696 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1698 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1699 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1707 #if !defined(CONFIG_USER_ONLY)
1709 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1713 /* Discard jump cache entries for any tb which might potentially
1714 overlap the flushed page. */
1715 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1716 memset (&env->tb_jmp_cache[i], 0,
1717 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1719 i = tb_jmp_cache_hash_page(addr);
1720 memset (&env->tb_jmp_cache[i], 0,
1721 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1724 /* NOTE: if flush_global is true, also flush global entries (not
1726 void tlb_flush(CPUState *env, int flush_global)
1730 #if defined(DEBUG_TLB)
1731 printf("tlb_flush:\n");
1733 /* must reset current TB so that interrupts cannot modify the
1734 links while we are modifying them */
1735 env->current_tb = NULL;
1737 for(i = 0; i < CPU_TLB_SIZE; i++) {
1738 env->tlb_table[0][i].addr_read = -1;
1739 env->tlb_table[0][i].addr_write = -1;
1740 env->tlb_table[0][i].addr_code = -1;
1741 env->tlb_table[1][i].addr_read = -1;
1742 env->tlb_table[1][i].addr_write = -1;
1743 env->tlb_table[1][i].addr_code = -1;
1744 #if (NB_MMU_MODES >= 3)
1745 env->tlb_table[2][i].addr_read = -1;
1746 env->tlb_table[2][i].addr_write = -1;
1747 env->tlb_table[2][i].addr_code = -1;
1749 #if (NB_MMU_MODES >= 4)
1750 env->tlb_table[3][i].addr_read = -1;
1751 env->tlb_table[3][i].addr_write = -1;
1752 env->tlb_table[3][i].addr_code = -1;
1754 #if (NB_MMU_MODES >= 5)
1755 env->tlb_table[4][i].addr_read = -1;
1756 env->tlb_table[4][i].addr_write = -1;
1757 env->tlb_table[4][i].addr_code = -1;
1762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1765 if (env->kqemu_enabled) {
1766 kqemu_flush(env, flush_global);
1772 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1774 if (addr == (tlb_entry->addr_read &
1775 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1776 addr == (tlb_entry->addr_write &
1777 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1778 addr == (tlb_entry->addr_code &
1779 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1780 tlb_entry->addr_read = -1;
1781 tlb_entry->addr_write = -1;
1782 tlb_entry->addr_code = -1;
1786 void tlb_flush_page(CPUState *env, target_ulong addr)
1790 #if defined(DEBUG_TLB)
1791 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1793 /* must reset current TB so that interrupts cannot modify the
1794 links while we are modifying them */
1795 env->current_tb = NULL;
1797 addr &= TARGET_PAGE_MASK;
1798 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1799 tlb_flush_entry(&env->tlb_table[0][i], addr);
1800 tlb_flush_entry(&env->tlb_table[1][i], addr);
1801 #if (NB_MMU_MODES >= 3)
1802 tlb_flush_entry(&env->tlb_table[2][i], addr);
1804 #if (NB_MMU_MODES >= 4)
1805 tlb_flush_entry(&env->tlb_table[3][i], addr);
1807 #if (NB_MMU_MODES >= 5)
1808 tlb_flush_entry(&env->tlb_table[4][i], addr);
1811 tlb_flush_jmp_cache(env, addr);
1814 if (env->kqemu_enabled) {
1815 kqemu_flush_page(env, addr);
1820 /* update the TLBs so that writes to code in the virtual page 'addr'
1822 static void tlb_protect_code(ram_addr_t ram_addr)
1824 cpu_physical_memory_reset_dirty(ram_addr,
1825 ram_addr + TARGET_PAGE_SIZE,
1829 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1830 tested for self modifying code */
1831 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1834 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1837 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1838 unsigned long start, unsigned long length)
1841 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1842 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1843 if ((addr - start) < length) {
1844 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1849 /* Note: start and end must be within the same ram block. */
1850 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1854 unsigned long length, start1;
1858 start &= TARGET_PAGE_MASK;
1859 end = TARGET_PAGE_ALIGN(end);
1861 length = end - start;
1864 len = length >> TARGET_PAGE_BITS;
1866 /* XXX: should not depend on cpu context */
1868 if (env->kqemu_enabled) {
1871 for(i = 0; i < len; i++) {
1872 kqemu_set_notdirty(env, addr);
1873 addr += TARGET_PAGE_SIZE;
1877 mask = ~dirty_flags;
1878 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1879 for(i = 0; i < len; i++)
1882 /* we modify the TLB cache so that the dirty bit will be set again
1883 when accessing the range */
1884 start1 = (unsigned long)qemu_get_ram_ptr(start);
1885 /* Chek that we don't span multiple blocks - this breaks the
1886 address comparisons below. */
1887 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1888 != (end - 1) - start) {
1892 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1893 for(i = 0; i < CPU_TLB_SIZE; i++)
1894 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1895 for(i = 0; i < CPU_TLB_SIZE; i++)
1896 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1897 #if (NB_MMU_MODES >= 3)
1898 for(i = 0; i < CPU_TLB_SIZE; i++)
1899 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1901 #if (NB_MMU_MODES >= 4)
1902 for(i = 0; i < CPU_TLB_SIZE; i++)
1903 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1905 #if (NB_MMU_MODES >= 5)
1906 for(i = 0; i < CPU_TLB_SIZE; i++)
1907 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1912 int cpu_physical_memory_set_dirty_tracking(int enable)
1914 in_migration = enable;
1918 int cpu_physical_memory_get_dirty_tracking(void)
1920 return in_migration;
1923 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1926 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1929 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1931 ram_addr_t ram_addr;
1934 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1935 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1936 + tlb_entry->addend);
1937 ram_addr = qemu_ram_addr_from_host(p);
1938 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1939 tlb_entry->addr_write |= TLB_NOTDIRTY;
1944 /* update the TLB according to the current state of the dirty bits */
1945 void cpu_tlb_update_dirty(CPUState *env)
1948 for(i = 0; i < CPU_TLB_SIZE; i++)
1949 tlb_update_dirty(&env->tlb_table[0][i]);
1950 for(i = 0; i < CPU_TLB_SIZE; i++)
1951 tlb_update_dirty(&env->tlb_table[1][i]);
1952 #if (NB_MMU_MODES >= 3)
1953 for(i = 0; i < CPU_TLB_SIZE; i++)
1954 tlb_update_dirty(&env->tlb_table[2][i]);
1956 #if (NB_MMU_MODES >= 4)
1957 for(i = 0; i < CPU_TLB_SIZE; i++)
1958 tlb_update_dirty(&env->tlb_table[3][i]);
1960 #if (NB_MMU_MODES >= 5)
1961 for(i = 0; i < CPU_TLB_SIZE; i++)
1962 tlb_update_dirty(&env->tlb_table[4][i]);
1966 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1968 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1969 tlb_entry->addr_write = vaddr;
1972 /* update the TLB corresponding to virtual page vaddr
1973 so that it is no longer dirty */
1974 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1978 vaddr &= TARGET_PAGE_MASK;
1979 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1980 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1981 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1982 #if (NB_MMU_MODES >= 3)
1983 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1985 #if (NB_MMU_MODES >= 4)
1986 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1988 #if (NB_MMU_MODES >= 5)
1989 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1993 /* add a new TLB entry. At most one entry for a given virtual address
1994 is permitted. Return 0 if OK or 2 if the page could not be mapped
1995 (can only happen in non SOFTMMU mode for I/O pages or pages
1996 conflicting with the host address space). */
1997 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1998 target_phys_addr_t paddr, int prot,
1999 int mmu_idx, int is_softmmu)
2004 target_ulong address;
2005 target_ulong code_address;
2006 target_phys_addr_t addend;
2010 target_phys_addr_t iotlb;
2012 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2014 pd = IO_MEM_UNASSIGNED;
2016 pd = p->phys_offset;
2018 #if defined(DEBUG_TLB)
2019 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2020 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2025 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2026 /* IO memory case (romd handled later) */
2027 address |= TLB_MMIO;
2029 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2030 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2032 iotlb = pd & TARGET_PAGE_MASK;
2033 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2034 iotlb |= IO_MEM_NOTDIRTY;
2036 iotlb |= IO_MEM_ROM;
2038 /* IO handlers are currently passed a phsical address.
2039 It would be nice to pass an offset from the base address
2040 of that region. This would avoid having to special case RAM,
2041 and avoid full address decoding in every device.
2042 We can't use the high bits of pd for this because
2043 IO_MEM_ROMD uses these as a ram address. */
2044 iotlb = (pd & ~TARGET_PAGE_MASK);
2046 iotlb += p->region_offset;
2052 code_address = address;
2053 /* Make accesses to pages with watchpoints go via the
2054 watchpoint trap routines. */
2055 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2056 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2057 iotlb = io_mem_watch + paddr;
2058 /* TODO: The memory case can be optimized by not trapping
2059 reads of pages with a write breakpoint. */
2060 address |= TLB_MMIO;
2064 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2065 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2066 te = &env->tlb_table[mmu_idx][index];
2067 te->addend = addend - vaddr;
2068 if (prot & PAGE_READ) {
2069 te->addr_read = address;
2074 if (prot & PAGE_EXEC) {
2075 te->addr_code = code_address;
2079 if (prot & PAGE_WRITE) {
2080 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2081 (pd & IO_MEM_ROMD)) {
2082 /* Write access calls the I/O callback. */
2083 te->addr_write = address | TLB_MMIO;
2084 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2085 !cpu_physical_memory_is_dirty(pd)) {
2086 te->addr_write = address | TLB_NOTDIRTY;
2088 te->addr_write = address;
2091 te->addr_write = -1;
2098 void tlb_flush(CPUState *env, int flush_global)
2102 void tlb_flush_page(CPUState *env, target_ulong addr)
2106 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2107 target_phys_addr_t paddr, int prot,
2108 int mmu_idx, int is_softmmu)
2113 /* dump memory mappings */
2114 void page_dump(FILE *f)
2116 unsigned long start, end;
2117 int i, j, prot, prot1;
2120 fprintf(f, "%-8s %-8s %-8s %s\n",
2121 "start", "end", "size", "prot");
2125 for(i = 0; i <= L1_SIZE; i++) {
2130 for(j = 0;j < L2_SIZE; j++) {
2135 if (prot1 != prot) {
2136 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2138 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2139 start, end, end - start,
2140 prot & PAGE_READ ? 'r' : '-',
2141 prot & PAGE_WRITE ? 'w' : '-',
2142 prot & PAGE_EXEC ? 'x' : '-');
2156 int page_get_flags(target_ulong address)
2160 p = page_find(address >> TARGET_PAGE_BITS);
2166 /* modify the flags of a page and invalidate the code if
2167 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2168 depending on PAGE_WRITE */
2169 void page_set_flags(target_ulong start, target_ulong end, int flags)
2174 /* mmap_lock should already be held. */
2175 start = start & TARGET_PAGE_MASK;
2176 end = TARGET_PAGE_ALIGN(end);
2177 if (flags & PAGE_WRITE)
2178 flags |= PAGE_WRITE_ORG;
2179 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2180 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2181 /* We may be called for host regions that are outside guest
2185 /* if the write protection is set, then we invalidate the code
2187 if (!(p->flags & PAGE_WRITE) &&
2188 (flags & PAGE_WRITE) &&
2190 tb_invalidate_phys_page(addr, 0, NULL);
2196 int page_check_range(target_ulong start, target_ulong len, int flags)
2202 if (start + len < start)
2203 /* we've wrapped around */
2206 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2207 start = start & TARGET_PAGE_MASK;
2209 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2210 p = page_find(addr >> TARGET_PAGE_BITS);
2213 if( !(p->flags & PAGE_VALID) )
2216 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2218 if (flags & PAGE_WRITE) {
2219 if (!(p->flags & PAGE_WRITE_ORG))
2221 /* unprotect the page if it was put read-only because it
2222 contains translated code */
2223 if (!(p->flags & PAGE_WRITE)) {
2224 if (!page_unprotect(addr, 0, NULL))
2233 /* called from signal handler: invalidate the code and unprotect the
2234 page. Return TRUE if the fault was succesfully handled. */
2235 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2237 unsigned int page_index, prot, pindex;
2239 target_ulong host_start, host_end, addr;
2241 /* Technically this isn't safe inside a signal handler. However we
2242 know this only ever happens in a synchronous SEGV handler, so in
2243 practice it seems to be ok. */
2246 host_start = address & qemu_host_page_mask;
2247 page_index = host_start >> TARGET_PAGE_BITS;
2248 p1 = page_find(page_index);
2253 host_end = host_start + qemu_host_page_size;
2256 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2260 /* if the page was really writable, then we change its
2261 protection back to writable */
2262 if (prot & PAGE_WRITE_ORG) {
2263 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2264 if (!(p1[pindex].flags & PAGE_WRITE)) {
2265 mprotect((void *)g2h(host_start), qemu_host_page_size,
2266 (prot & PAGE_BITS) | PAGE_WRITE);
2267 p1[pindex].flags |= PAGE_WRITE;
2268 /* and since the content will be modified, we must invalidate
2269 the corresponding translated code. */
2270 tb_invalidate_phys_page(address, pc, puc);
2271 #ifdef DEBUG_TB_CHECK
2272 tb_invalidate_check(address);
2282 static inline void tlb_set_dirty(CPUState *env,
2283 unsigned long addr, target_ulong vaddr)
2286 #endif /* defined(CONFIG_USER_ONLY) */
2288 #if !defined(CONFIG_USER_ONLY)
2290 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2291 ram_addr_t memory, ram_addr_t region_offset);
2292 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2293 ram_addr_t orig_memory, ram_addr_t region_offset);
2294 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2297 if (addr > start_addr) \
2300 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2301 if (start_addr2 > 0) \
2305 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2306 end_addr2 = TARGET_PAGE_SIZE - 1; \
2308 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2309 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2314 /* register physical memory. 'size' must be a multiple of the target
2315 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2316 io memory page. The address used when calling the IO function is
2317 the offset from the start of the region, plus region_offset. Both
2318 start_region and regon_offset are rounded down to a page boundary
2319 before calculating this offset. This should not be a problem unless
2320 the low bits of start_addr and region_offset differ. */
2321 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2323 ram_addr_t phys_offset,
2324 ram_addr_t region_offset)
2326 target_phys_addr_t addr, end_addr;
2329 ram_addr_t orig_size = size;
2333 /* XXX: should not depend on cpu context */
2335 if (env->kqemu_enabled) {
2336 kqemu_set_phys_mem(start_addr, size, phys_offset);
2340 kvm_set_phys_mem(start_addr, size, phys_offset);
2342 if (phys_offset == IO_MEM_UNASSIGNED) {
2343 region_offset = start_addr;
2345 region_offset &= TARGET_PAGE_MASK;
2346 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2347 end_addr = start_addr + (target_phys_addr_t)size;
2348 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2349 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2350 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2351 ram_addr_t orig_memory = p->phys_offset;
2352 target_phys_addr_t start_addr2, end_addr2;
2353 int need_subpage = 0;
2355 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2357 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2358 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2359 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2360 &p->phys_offset, orig_memory,
2363 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2366 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2368 p->region_offset = 0;
2370 p->phys_offset = phys_offset;
2371 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2372 (phys_offset & IO_MEM_ROMD))
2373 phys_offset += TARGET_PAGE_SIZE;
2376 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2377 p->phys_offset = phys_offset;
2378 p->region_offset = region_offset;
2379 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2380 (phys_offset & IO_MEM_ROMD)) {
2381 phys_offset += TARGET_PAGE_SIZE;
2383 target_phys_addr_t start_addr2, end_addr2;
2384 int need_subpage = 0;
2386 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2387 end_addr2, need_subpage);
2389 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2390 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2391 &p->phys_offset, IO_MEM_UNASSIGNED,
2392 addr & TARGET_PAGE_MASK);
2393 subpage_register(subpage, start_addr2, end_addr2,
2394 phys_offset, region_offset);
2395 p->region_offset = 0;
2399 region_offset += TARGET_PAGE_SIZE;
2402 /* since each CPU stores ram addresses in its TLB cache, we must
2403 reset the modified entries */
2405 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2410 /* XXX: temporary until new memory mapping API */
2411 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2417 return IO_MEM_UNASSIGNED;
2418 return p->phys_offset;
2421 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2424 kvm_coalesce_mmio_region(addr, size);
2427 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2430 kvm_uncoalesce_mmio_region(addr, size);
2434 /* XXX: better than nothing */
2435 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2438 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2439 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2440 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2443 addr = last_ram_offset;
2444 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2449 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2451 RAMBlock *new_block;
2454 if (kqemu_phys_ram_base) {
2455 return kqemu_ram_alloc(size);
2459 size = TARGET_PAGE_ALIGN(size);
2460 new_block = qemu_malloc(sizeof(*new_block));
2462 new_block->host = qemu_vmalloc(size);
2463 new_block->offset = last_ram_offset;
2464 new_block->length = size;
2466 new_block->next = ram_blocks;
2467 ram_blocks = new_block;
2469 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2470 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2471 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2472 0xff, size >> TARGET_PAGE_BITS);
2474 last_ram_offset += size;
2476 return new_block->offset;
2479 void qemu_ram_free(ram_addr_t addr)
2481 /* TODO: implement this. */
2484 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2485 With the exception of the softmmu code in this file, this should
2486 only be used for local memory (e.g. video ram) that the device owns,
2487 and knows it isn't going to access beyond the end of the block.
2489 It should not be used for general purpose DMA.
2490 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2492 void *qemu_get_ram_ptr(ram_addr_t addr)
2499 if (kqemu_phys_ram_base) {
2500 return kqemu_phys_ram_base + addr;
2505 prevp = &ram_blocks;
2507 while (block && (block->offset > addr
2508 || block->offset + block->length <= addr)) {
2510 prevp = &prev->next;
2512 block = block->next;
2515 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2518 /* Move this entry to to start of the list. */
2520 prev->next = block->next;
2521 block->next = *prevp;
2524 return block->host + (addr - block->offset);
2527 /* Some of the softmmu routines need to translate from a host pointer
2528 (typically a TLB entry) back to a ram offset. */
2529 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2534 uint8_t *host = ptr;
2537 if (kqemu_phys_ram_base) {
2538 return host - kqemu_phys_ram_base;
2543 prevp = &ram_blocks;
2545 while (block && (block->host > host
2546 || block->host + block->length <= host)) {
2548 prevp = &prev->next;
2550 block = block->next;
2553 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2556 return block->offset + (host - block->host);
2559 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2561 #ifdef DEBUG_UNASSIGNED
2562 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2564 #if defined(TARGET_SPARC)
2565 do_unassigned_access(addr, 0, 0, 0, 1);
2570 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2572 #ifdef DEBUG_UNASSIGNED
2573 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2575 #if defined(TARGET_SPARC)
2576 do_unassigned_access(addr, 0, 0, 0, 2);
2581 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2583 #ifdef DEBUG_UNASSIGNED
2584 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2586 #if defined(TARGET_SPARC)
2587 do_unassigned_access(addr, 0, 0, 0, 4);
2592 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2594 #ifdef DEBUG_UNASSIGNED
2595 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2597 #if defined(TARGET_SPARC)
2598 do_unassigned_access(addr, 1, 0, 0, 1);
2602 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2604 #ifdef DEBUG_UNASSIGNED
2605 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2607 #if defined(TARGET_SPARC)
2608 do_unassigned_access(addr, 1, 0, 0, 2);
2612 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2614 #ifdef DEBUG_UNASSIGNED
2615 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2617 #if defined(TARGET_SPARC)
2618 do_unassigned_access(addr, 1, 0, 0, 4);
2622 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2623 unassigned_mem_readb,
2624 unassigned_mem_readw,
2625 unassigned_mem_readl,
2628 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2629 unassigned_mem_writeb,
2630 unassigned_mem_writew,
2631 unassigned_mem_writel,
2634 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2638 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2639 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2640 #if !defined(CONFIG_USER_ONLY)
2641 tb_invalidate_phys_page_fast(ram_addr, 1);
2642 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2645 stb_p(qemu_get_ram_ptr(ram_addr), val);
2647 if (cpu_single_env->kqemu_enabled &&
2648 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2649 kqemu_modify_page(cpu_single_env, ram_addr);
2651 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2652 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2653 /* we remove the notdirty callback only if the code has been
2655 if (dirty_flags == 0xff)
2656 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2659 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2663 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2664 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2665 #if !defined(CONFIG_USER_ONLY)
2666 tb_invalidate_phys_page_fast(ram_addr, 2);
2667 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2670 stw_p(qemu_get_ram_ptr(ram_addr), val);
2672 if (cpu_single_env->kqemu_enabled &&
2673 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2674 kqemu_modify_page(cpu_single_env, ram_addr);
2676 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2677 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2678 /* we remove the notdirty callback only if the code has been
2680 if (dirty_flags == 0xff)
2681 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2684 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2688 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2689 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2690 #if !defined(CONFIG_USER_ONLY)
2691 tb_invalidate_phys_page_fast(ram_addr, 4);
2692 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2695 stl_p(qemu_get_ram_ptr(ram_addr), val);
2697 if (cpu_single_env->kqemu_enabled &&
2698 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2699 kqemu_modify_page(cpu_single_env, ram_addr);
2701 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2702 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2703 /* we remove the notdirty callback only if the code has been
2705 if (dirty_flags == 0xff)
2706 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2709 static CPUReadMemoryFunc *error_mem_read[3] = {
2710 NULL, /* never used */
2711 NULL, /* never used */
2712 NULL, /* never used */
2715 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2716 notdirty_mem_writeb,
2717 notdirty_mem_writew,
2718 notdirty_mem_writel,
2721 /* Generate a debug exception if a watchpoint has been hit. */
2722 static void check_watchpoint(int offset, int len_mask, int flags)
2724 CPUState *env = cpu_single_env;
2725 target_ulong pc, cs_base;
2726 TranslationBlock *tb;
2731 if (env->watchpoint_hit) {
2732 /* We re-entered the check after replacing the TB. Now raise
2733 * the debug interrupt so that is will trigger after the
2734 * current instruction. */
2735 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2738 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2739 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2740 if ((vaddr == (wp->vaddr & len_mask) ||
2741 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2742 wp->flags |= BP_WATCHPOINT_HIT;
2743 if (!env->watchpoint_hit) {
2744 env->watchpoint_hit = wp;
2745 tb = tb_find_pc(env->mem_io_pc);
2747 cpu_abort(env, "check_watchpoint: could not find TB for "
2748 "pc=%p", (void *)env->mem_io_pc);
2750 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2751 tb_phys_invalidate(tb, -1);
2752 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2753 env->exception_index = EXCP_DEBUG;
2755 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2756 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2758 cpu_resume_from_signal(env, NULL);
2761 wp->flags &= ~BP_WATCHPOINT_HIT;
2766 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2767 so these check for a hit then pass through to the normal out-of-line
2769 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2771 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2772 return ldub_phys(addr);
2775 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2777 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2778 return lduw_phys(addr);
2781 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2783 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2784 return ldl_phys(addr);
2787 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2790 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2791 stb_phys(addr, val);
2794 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2797 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2798 stw_phys(addr, val);
2801 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2804 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2805 stl_phys(addr, val);
2808 static CPUReadMemoryFunc *watch_mem_read[3] = {
2814 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2820 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2826 idx = SUBPAGE_IDX(addr);
2827 #if defined(DEBUG_SUBPAGE)
2828 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2829 mmio, len, addr, idx);
2831 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2832 addr + mmio->region_offset[idx][0][len]);
2837 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2838 uint32_t value, unsigned int len)
2842 idx = SUBPAGE_IDX(addr);
2843 #if defined(DEBUG_SUBPAGE)
2844 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2845 mmio, len, addr, idx, value);
2847 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2848 addr + mmio->region_offset[idx][1][len],
2852 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2854 #if defined(DEBUG_SUBPAGE)
2855 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2858 return subpage_readlen(opaque, addr, 0);
2861 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2864 #if defined(DEBUG_SUBPAGE)
2865 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2867 subpage_writelen(opaque, addr, value, 0);
2870 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2872 #if defined(DEBUG_SUBPAGE)
2873 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2876 return subpage_readlen(opaque, addr, 1);
2879 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2882 #if defined(DEBUG_SUBPAGE)
2883 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2885 subpage_writelen(opaque, addr, value, 1);
2888 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2890 #if defined(DEBUG_SUBPAGE)
2891 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2894 return subpage_readlen(opaque, addr, 2);
2897 static void subpage_writel (void *opaque,
2898 target_phys_addr_t addr, uint32_t value)
2900 #if defined(DEBUG_SUBPAGE)
2901 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2903 subpage_writelen(opaque, addr, value, 2);
2906 static CPUReadMemoryFunc *subpage_read[] = {
2912 static CPUWriteMemoryFunc *subpage_write[] = {
2918 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2919 ram_addr_t memory, ram_addr_t region_offset)
2924 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2926 idx = SUBPAGE_IDX(start);
2927 eidx = SUBPAGE_IDX(end);
2928 #if defined(DEBUG_SUBPAGE)
2929 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2930 mmio, start, end, idx, eidx, memory);
2932 memory >>= IO_MEM_SHIFT;
2933 for (; idx <= eidx; idx++) {
2934 for (i = 0; i < 4; i++) {
2935 if (io_mem_read[memory][i]) {
2936 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2937 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2938 mmio->region_offset[idx][0][i] = region_offset;
2940 if (io_mem_write[memory][i]) {
2941 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2942 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2943 mmio->region_offset[idx][1][i] = region_offset;
2951 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2952 ram_addr_t orig_memory, ram_addr_t region_offset)
2957 mmio = qemu_mallocz(sizeof(subpage_t));
2960 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2961 #if defined(DEBUG_SUBPAGE)
2962 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2963 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2965 *phys = subpage_memory | IO_MEM_SUBPAGE;
2966 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2972 static int get_free_io_mem_idx(void)
2976 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2977 if (!io_mem_used[i]) {
2985 static void io_mem_init(void)
2989 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2990 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2991 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2995 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2996 watch_mem_write, NULL);
2998 if (kqemu_phys_ram_base) {
2999 /* alloc dirty bits array */
3000 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3001 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3006 /* mem_read and mem_write are arrays of functions containing the
3007 function to access byte (index 0), word (index 1) and dword (index
3008 2). Functions can be omitted with a NULL function pointer. The
3009 registered functions may be modified dynamically later.
3010 If io_index is non zero, the corresponding io zone is
3011 modified. If it is zero, a new io zone is allocated. The return
3012 value can be used with cpu_register_physical_memory(). (-1) is
3013 returned if error. */
3014 int cpu_register_io_memory(int io_index,
3015 CPUReadMemoryFunc **mem_read,
3016 CPUWriteMemoryFunc **mem_write,
3019 int i, subwidth = 0;
3021 if (io_index <= 0) {
3022 io_index = get_free_io_mem_idx();
3026 if (io_index >= IO_MEM_NB_ENTRIES)
3030 for(i = 0;i < 3; i++) {
3031 if (!mem_read[i] || !mem_write[i])
3032 subwidth = IO_MEM_SUBWIDTH;
3033 io_mem_read[io_index][i] = mem_read[i];
3034 io_mem_write[io_index][i] = mem_write[i];
3036 io_mem_opaque[io_index] = opaque;
3037 return (io_index << IO_MEM_SHIFT) | subwidth;
3040 void cpu_unregister_io_memory(int io_table_address)
3043 int io_index = io_table_address >> IO_MEM_SHIFT;
3045 for (i=0;i < 3; i++) {
3046 io_mem_read[io_index][i] = unassigned_mem_read[i];
3047 io_mem_write[io_index][i] = unassigned_mem_write[i];
3049 io_mem_opaque[io_index] = NULL;
3050 io_mem_used[io_index] = 0;
3053 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3055 return io_mem_write[io_index >> IO_MEM_SHIFT];
3058 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3060 return io_mem_read[io_index >> IO_MEM_SHIFT];
3063 #endif /* !defined(CONFIG_USER_ONLY) */
3065 /* physical memory access (slow version, mainly for debug) */
3066 #if defined(CONFIG_USER_ONLY)
3067 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3068 int len, int is_write)
3075 page = addr & TARGET_PAGE_MASK;
3076 l = (page + TARGET_PAGE_SIZE) - addr;
3079 flags = page_get_flags(page);
3080 if (!(flags & PAGE_VALID))
3083 if (!(flags & PAGE_WRITE))
3085 /* XXX: this code should not depend on lock_user */
3086 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3087 /* FIXME - should this return an error rather than just fail? */
3090 unlock_user(p, addr, l);
3092 if (!(flags & PAGE_READ))
3094 /* XXX: this code should not depend on lock_user */
3095 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3096 /* FIXME - should this return an error rather than just fail? */
3099 unlock_user(p, addr, 0);
3108 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3109 int len, int is_write)
3114 target_phys_addr_t page;
3119 page = addr & TARGET_PAGE_MASK;
3120 l = (page + TARGET_PAGE_SIZE) - addr;
3123 p = phys_page_find(page >> TARGET_PAGE_BITS);
3125 pd = IO_MEM_UNASSIGNED;
3127 pd = p->phys_offset;
3131 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3132 target_phys_addr_t addr1 = addr;
3133 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3135 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3136 /* XXX: could force cpu_single_env to NULL to avoid
3138 if (l >= 4 && ((addr1 & 3) == 0)) {
3139 /* 32 bit write access */
3141 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3143 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3144 /* 16 bit write access */
3146 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3149 /* 8 bit write access */
3151 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3155 unsigned long addr1;
3156 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3158 ptr = qemu_get_ram_ptr(addr1);
3159 memcpy(ptr, buf, l);
3160 if (!cpu_physical_memory_is_dirty(addr1)) {
3161 /* invalidate code */
3162 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3164 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3165 (0xff & ~CODE_DIRTY_FLAG);
3169 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3170 !(pd & IO_MEM_ROMD)) {
3171 target_phys_addr_t addr1 = addr;
3173 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3175 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3176 if (l >= 4 && ((addr1 & 3) == 0)) {
3177 /* 32 bit read access */
3178 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3181 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3182 /* 16 bit read access */
3183 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3187 /* 8 bit read access */
3188 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3194 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3195 (addr & ~TARGET_PAGE_MASK);
3196 memcpy(buf, ptr, l);
3205 /* used for ROM loading : can write in RAM and ROM */
3206 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3207 const uint8_t *buf, int len)
3211 target_phys_addr_t page;
3216 page = addr & TARGET_PAGE_MASK;
3217 l = (page + TARGET_PAGE_SIZE) - addr;
3220 p = phys_page_find(page >> TARGET_PAGE_BITS);
3222 pd = IO_MEM_UNASSIGNED;
3224 pd = p->phys_offset;
3227 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3228 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3229 !(pd & IO_MEM_ROMD)) {
3232 unsigned long addr1;
3233 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3235 ptr = qemu_get_ram_ptr(addr1);
3236 memcpy(ptr, buf, l);
3246 target_phys_addr_t addr;
3247 target_phys_addr_t len;
3250 static BounceBuffer bounce;
3252 typedef struct MapClient {
3254 void (*callback)(void *opaque);
3255 LIST_ENTRY(MapClient) link;
3258 static LIST_HEAD(map_client_list, MapClient) map_client_list
3259 = LIST_HEAD_INITIALIZER(map_client_list);
3261 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3263 MapClient *client = qemu_malloc(sizeof(*client));
3265 client->opaque = opaque;
3266 client->callback = callback;
3267 LIST_INSERT_HEAD(&map_client_list, client, link);
3271 void cpu_unregister_map_client(void *_client)
3273 MapClient *client = (MapClient *)_client;
3275 LIST_REMOVE(client, link);
3278 static void cpu_notify_map_clients(void)
3282 while (!LIST_EMPTY(&map_client_list)) {
3283 client = LIST_FIRST(&map_client_list);
3284 client->callback(client->opaque);
3285 LIST_REMOVE(client, link);
3289 /* Map a physical memory region into a host virtual address.
3290 * May map a subset of the requested range, given by and returned in *plen.
3291 * May return NULL if resources needed to perform the mapping are exhausted.
3292 * Use only for reads OR writes - not for read-modify-write operations.
3293 * Use cpu_register_map_client() to know when retrying the map operation is
3294 * likely to succeed.
3296 void *cpu_physical_memory_map(target_phys_addr_t addr,
3297 target_phys_addr_t *plen,
3300 target_phys_addr_t len = *plen;
3301 target_phys_addr_t done = 0;
3303 uint8_t *ret = NULL;
3305 target_phys_addr_t page;
3308 unsigned long addr1;
3311 page = addr & TARGET_PAGE_MASK;
3312 l = (page + TARGET_PAGE_SIZE) - addr;
3315 p = phys_page_find(page >> TARGET_PAGE_BITS);
3317 pd = IO_MEM_UNASSIGNED;
3319 pd = p->phys_offset;
3322 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3323 if (done || bounce.buffer) {
3326 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3330 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3332 ptr = bounce.buffer;
3334 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3335 ptr = qemu_get_ram_ptr(addr1);
3339 } else if (ret + done != ptr) {
3351 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3352 * Will also mark the memory as dirty if is_write == 1. access_len gives
3353 * the amount of memory that was actually read or written by the caller.
3355 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3356 int is_write, target_phys_addr_t access_len)
3358 if (buffer != bounce.buffer) {
3360 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3361 while (access_len) {
3363 l = TARGET_PAGE_SIZE;
3366 if (!cpu_physical_memory_is_dirty(addr1)) {
3367 /* invalidate code */
3368 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3370 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3371 (0xff & ~CODE_DIRTY_FLAG);
3380 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3382 qemu_free(bounce.buffer);
3383 bounce.buffer = NULL;
3384 cpu_notify_map_clients();
3387 /* warning: addr must be aligned */
3388 uint32_t ldl_phys(target_phys_addr_t addr)
3396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3398 pd = IO_MEM_UNASSIGNED;
3400 pd = p->phys_offset;
3403 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3404 !(pd & IO_MEM_ROMD)) {
3406 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3408 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3409 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3412 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3413 (addr & ~TARGET_PAGE_MASK);
3419 /* warning: addr must be aligned */
3420 uint64_t ldq_phys(target_phys_addr_t addr)
3428 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3430 pd = IO_MEM_UNASSIGNED;
3432 pd = p->phys_offset;
3435 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3436 !(pd & IO_MEM_ROMD)) {
3438 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3440 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3441 #ifdef TARGET_WORDS_BIGENDIAN
3442 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3443 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3445 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3446 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3450 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3451 (addr & ~TARGET_PAGE_MASK);
3458 uint32_t ldub_phys(target_phys_addr_t addr)
3461 cpu_physical_memory_read(addr, &val, 1);
3466 uint32_t lduw_phys(target_phys_addr_t addr)
3469 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3470 return tswap16(val);
3473 /* warning: addr must be aligned. The ram page is not masked as dirty
3474 and the code inside is not invalidated. It is useful if the dirty
3475 bits are used to track modified PTEs */
3476 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3483 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3485 pd = IO_MEM_UNASSIGNED;
3487 pd = p->phys_offset;
3490 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3491 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3493 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3494 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3496 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3497 ptr = qemu_get_ram_ptr(addr1);
3500 if (unlikely(in_migration)) {
3501 if (!cpu_physical_memory_is_dirty(addr1)) {
3502 /* invalidate code */
3503 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3505 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3506 (0xff & ~CODE_DIRTY_FLAG);
3512 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3519 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3521 pd = IO_MEM_UNASSIGNED;
3523 pd = p->phys_offset;
3526 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3527 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3529 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3530 #ifdef TARGET_WORDS_BIGENDIAN
3531 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3532 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3534 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3535 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3538 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3539 (addr & ~TARGET_PAGE_MASK);
3544 /* warning: addr must be aligned */
3545 void stl_phys(target_phys_addr_t addr, uint32_t val)
3552 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3554 pd = IO_MEM_UNASSIGNED;
3556 pd = p->phys_offset;
3559 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3560 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3562 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3563 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3565 unsigned long addr1;
3566 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3568 ptr = qemu_get_ram_ptr(addr1);
3570 if (!cpu_physical_memory_is_dirty(addr1)) {
3571 /* invalidate code */
3572 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3574 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3575 (0xff & ~CODE_DIRTY_FLAG);
3581 void stb_phys(target_phys_addr_t addr, uint32_t val)
3584 cpu_physical_memory_write(addr, &v, 1);
3588 void stw_phys(target_phys_addr_t addr, uint32_t val)
3590 uint16_t v = tswap16(val);
3591 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3595 void stq_phys(target_phys_addr_t addr, uint64_t val)
3598 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3603 /* virtual memory access for debug (includes writing to ROM) */
3604 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3605 uint8_t *buf, int len, int is_write)
3608 target_phys_addr_t phys_addr;
3612 page = addr & TARGET_PAGE_MASK;
3613 phys_addr = cpu_get_phys_page_debug(env, page);
3614 /* if no physical page mapped, return an error */
3615 if (phys_addr == -1)
3617 l = (page + TARGET_PAGE_SIZE) - addr;
3620 phys_addr += (addr & ~TARGET_PAGE_MASK);
3621 #if !defined(CONFIG_USER_ONLY)
3623 cpu_physical_memory_write_rom(phys_addr, buf, l);
3626 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3634 /* in deterministic execution mode, instructions doing device I/Os
3635 must be at the end of the TB */
3636 void cpu_io_recompile(CPUState *env, void *retaddr)
3638 TranslationBlock *tb;
3640 target_ulong pc, cs_base;
3643 tb = tb_find_pc((unsigned long)retaddr);
3645 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3648 n = env->icount_decr.u16.low + tb->icount;
3649 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3650 /* Calculate how many instructions had been executed before the fault
3652 n = n - env->icount_decr.u16.low;
3653 /* Generate a new TB ending on the I/O insn. */
3655 /* On MIPS and SH, delay slot instructions can only be restarted if
3656 they were already the first instruction in the TB. If this is not
3657 the first instruction in a TB then re-execute the preceding
3659 #if defined(TARGET_MIPS)
3660 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3661 env->active_tc.PC -= 4;
3662 env->icount_decr.u16.low++;
3663 env->hflags &= ~MIPS_HFLAG_BMASK;
3665 #elif defined(TARGET_SH4)
3666 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3669 env->icount_decr.u16.low++;
3670 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3673 /* This should never happen. */
3674 if (n > CF_COUNT_MASK)
3675 cpu_abort(env, "TB too big during recompile");
3677 cflags = n | CF_LAST_IO;
3679 cs_base = tb->cs_base;
3681 tb_phys_invalidate(tb, -1);
3682 /* FIXME: In theory this could raise an exception. In practice
3683 we have already translated the block once so it's probably ok. */
3684 tb_gen_code(env, pc, cs_base, flags, cflags);
3685 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3686 the first in the TB) then we end up generating a whole new TB and
3687 repeating the fault, which is horribly inefficient.
3688 Better would be to execute just this insn uncached, or generate a
3690 cpu_resume_from_signal(env, NULL);
3693 void dump_exec_info(FILE *f,
3694 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3696 int i, target_code_size, max_target_code_size;
3697 int direct_jmp_count, direct_jmp2_count, cross_page;
3698 TranslationBlock *tb;
3700 target_code_size = 0;
3701 max_target_code_size = 0;
3703 direct_jmp_count = 0;
3704 direct_jmp2_count = 0;
3705 for(i = 0; i < nb_tbs; i++) {
3707 target_code_size += tb->size;
3708 if (tb->size > max_target_code_size)
3709 max_target_code_size = tb->size;
3710 if (tb->page_addr[1] != -1)
3712 if (tb->tb_next_offset[0] != 0xffff) {
3714 if (tb->tb_next_offset[1] != 0xffff) {
3715 direct_jmp2_count++;
3719 /* XXX: avoid using doubles ? */
3720 cpu_fprintf(f, "Translation buffer state:\n");
3721 cpu_fprintf(f, "gen code size %ld/%ld\n",
3722 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3723 cpu_fprintf(f, "TB count %d/%d\n",
3724 nb_tbs, code_gen_max_blocks);
3725 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3726 nb_tbs ? target_code_size / nb_tbs : 0,
3727 max_target_code_size);
3728 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3729 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3730 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3731 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3733 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3734 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3736 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3738 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3739 cpu_fprintf(f, "\nStatistics:\n");
3740 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3741 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3742 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3743 tcg_dump_info(f, cpu_fprintf);
3746 #if !defined(CONFIG_USER_ONLY)
3748 #define MMUSUFFIX _cmmu
3749 #define GETPC() NULL
3750 #define env cpu_single_env
3751 #define SOFTMMU_CODE_ACCESS
3754 #include "softmmu_template.h"
3757 #include "softmmu_template.h"
3760 #include "softmmu_template.h"
3763 #include "softmmu_template.h"