X-Git-Url: http://git.maemo.org/git/?a=blobdiff_plain;f=cpu-defs.h;h=b462a9fa0c1f5ef72dd1e441d2f30968e0b62e50;hb=3576a7b2d660bdb0658f15e6978a3ec0cdd189a9;hp=3ec866fa7b09d96481693be2d0c0f15c3b30af83;hpb=ab93bbe2aebebce9901e740b1000058f74c15e26;p=qemu diff --git a/cpu-defs.h b/cpu-defs.h index 3ec866f..b462a9f 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -1,6 +1,6 @@ /* * common defines for all CPUs - * + * * Copyright (c) 2003 Fabrice Bellard * * This library is free software; you can redistribute it and/or @@ -15,25 +15,203 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA */ #ifndef CPU_DEFS_H #define CPU_DEFS_H +#ifndef NEED_CPU_H +#error cpu.h included from common code +#endif + #include "config.h" #include +#include +#include +#include "osdep.h" +#include "sys-queue.h" -#define EXCP_INTERRUPT 256 /* async interruption */ -#define EXCP_HLT 257 /* hlt instruction reached */ -#define EXCP_DEBUG 258 /* cpu stopped after a breakpoint or singlestep */ +#ifndef TARGET_LONG_BITS +#error TARGET_LONG_BITS must be defined before including this header +#endif + +#ifndef TARGET_PHYS_ADDR_BITS +#if TARGET_LONG_BITS >= HOST_LONG_BITS +#define TARGET_PHYS_ADDR_BITS TARGET_LONG_BITS +#else +#define TARGET_PHYS_ADDR_BITS HOST_LONG_BITS +#endif +#endif + +#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) + +/* target_ulong is the type of a virtual address */ +#if TARGET_LONG_SIZE == 4 +typedef int32_t target_long; +typedef uint32_t target_ulong; +#define TARGET_FMT_lx "%08x" +#define TARGET_FMT_ld "%d" +#define TARGET_FMT_lu "%u" +#elif TARGET_LONG_SIZE == 8 +typedef int64_t target_long; +typedef uint64_t target_ulong; +#define TARGET_FMT_lx "%016" PRIx64 +#define TARGET_FMT_ld "%" PRId64 +#define TARGET_FMT_lu "%" PRIu64 +#else +#error TARGET_LONG_SIZE undefined +#endif + +/* target_phys_addr_t is the type of a physical address (its size can + be different from 'target_ulong'). We have sizeof(target_phys_addr) + = max(sizeof(unsigned long), + sizeof(size_of_target_physical_address)) because we must pass a + host pointer to memory operations in some cases */ + +#if TARGET_PHYS_ADDR_BITS == 32 +typedef uint32_t target_phys_addr_t; +#define TARGET_FMT_plx "%08x" +#elif TARGET_PHYS_ADDR_BITS == 64 +typedef uint64_t target_phys_addr_t; +#define TARGET_FMT_plx "%016" PRIx64 +#else +#error TARGET_PHYS_ADDR_BITS undefined +#endif -#define MAX_BREAKPOINTS 32 +#define HOST_LONG_SIZE (HOST_LONG_BITS / 8) -#define CPU_TLB_SIZE 256 +#define EXCP_INTERRUPT 0x10000 /* async interruption */ +#define EXCP_HLT 0x10001 /* hlt instruction reached */ +#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ +#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ + +#define TB_JMP_CACHE_BITS 12 +#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) + +/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for + addresses on the same page. The top bits are the same. This allows + TLB invalidation to quickly clear a subset of the hash table. */ +#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) +#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) +#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) +#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) + +#define CPU_TLB_BITS 8 +#define CPU_TLB_SIZE (1 << CPU_TLB_BITS) + +#if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 +#define CPU_TLB_ENTRY_BITS 4 +#else +#define CPU_TLB_ENTRY_BITS 5 +#endif typedef struct CPUTLBEntry { - uint32_t address; - uint32_t addend; + /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address + bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not + go directly to ram. + bit 3 : indicates that the entry is invalid + bit 2..0 : zero + */ + target_ulong addr_read; + target_ulong addr_write; + target_ulong addr_code; + /* Addend to virtual address to get physical address. IO accesses + use the corresponding iotlb value. */ +#if TARGET_PHYS_ADDR_BITS == 64 + /* on i386 Linux make sure it is aligned */ + target_phys_addr_t addend __attribute__((aligned(8))); +#else + target_phys_addr_t addend; +#endif + /* padding to get a power of two size */ + uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - + (sizeof(target_ulong) * 3 + + ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + + sizeof(target_phys_addr_t))]; } CPUTLBEntry; +#ifdef WORDS_BIGENDIAN +typedef struct icount_decr_u16 { + uint16_t high; + uint16_t low; +} icount_decr_u16; +#else +typedef struct icount_decr_u16 { + uint16_t low; + uint16_t high; +} icount_decr_u16; +#endif + +struct kvm_run; +struct KVMState; + +typedef struct CPUBreakpoint { + target_ulong pc; + int flags; /* BP_* */ + TAILQ_ENTRY(CPUBreakpoint) entry; +} CPUBreakpoint; + +typedef struct CPUWatchpoint { + target_ulong vaddr; + target_ulong len_mask; + int flags; /* BP_* */ + TAILQ_ENTRY(CPUWatchpoint) entry; +} CPUWatchpoint; + +#define CPU_TEMP_BUF_NLONGS 128 +#define CPU_COMMON \ + struct TranslationBlock *current_tb; /* currently executing TB */ \ + /* soft mmu support */ \ + /* in order to avoid passing too many arguments to the MMIO \ + helpers, we store some rarely used information in the CPU \ + context) */ \ + unsigned long mem_io_pc; /* host pc at which the memory was \ + accessed */ \ + target_ulong mem_io_vaddr; /* target virtual addr at which the \ + memory was accessed */ \ + uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ + uint32_t interrupt_request; \ + volatile sig_atomic_t exit_request; \ + /* The meaning of the MMU modes is defined in the target code. */ \ + CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ + target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ + /* buffer for temporaries in the code generator */ \ + long temp_buf[CPU_TEMP_BUF_NLONGS]; \ + \ + int64_t icount_extra; /* Instructions until next timer event. */ \ + /* Number of cycles left, with interrupt flag in high bit. \ + This allows a single read-compare-cbranch-write sequence to test \ + for both decrementer underflow and exceptions. */ \ + union { \ + uint32_t u32; \ + icount_decr_u16 u16; \ + } icount_decr; \ + uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ + \ + /* from this point: preserved by CPU reset */ \ + /* ice debug support */ \ + TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \ + int singlestep_enabled; \ + \ + TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \ + CPUWatchpoint *watchpoint_hit; \ + \ + struct GDBRegisterState *gdb_regs; \ + \ + /* Core interrupt code */ \ + jmp_buf jmp_env; \ + int exception_index; \ + \ + CPUState *next_cpu; /* next CPU sharing TB cache */ \ + int cpu_index; /* CPU index (informative) */ \ + int running; /* Nonzero if cpu is currently running(usermode). */ \ + /* user data */ \ + void *opaque; \ + \ + const char *cpu_model_str; \ + struct KVMState *kvm_state; \ + struct kvm_run *kvm_run; \ + int kvm_fd; + #endif