X-Git-Url: http://git.maemo.org/git/?a=blobdiff_plain;f=cpu-all.h;h=ebe8bfbadb030b9d8c400b3137e24e0f5541b165;hb=1a621c8dc9e4dcc9d385bdd2c24c5b5dbfd0ebe4;hp=6a1612536203c24446bae562bb0bd366bf786ac2;hpb=0f459d16c378128a586cf161526721a81b629405;p=qemu diff --git a/cpu-all.h b/cpu-all.h index 6a16125..ebe8bfb 100644 --- a/cpu-all.h +++ b/cpu-all.h @@ -14,22 +14,20 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * License along with this library; if not, see . */ #ifndef CPU_ALL_H #define CPU_ALL_H -#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) -#define WORDS_ALIGNED -#endif +#include "qemu-common.h" +#include "cpu-common.h" /* some important defines: * * WORDS_ALIGNED : if defined, the host cpu can only make word aligned * memory accesses. * - * WORDS_BIGENDIAN : if defined, the host cpu is big endian and + * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and * otherwise little endian. * * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) @@ -37,10 +35,9 @@ * TARGET_WORDS_BIGENDIAN : same for target cpu */ -#include "bswap.h" #include "softfloat.h" -#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) +#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) #define BSWAP_NEEDED #endif @@ -126,7 +123,7 @@ typedef union { endian ! */ typedef union { float64 d; -#if defined(WORDS_BIGENDIAN) \ +#if defined(HOST_WORDS_BIGENDIAN) \ || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) struct { uint32_t upper; @@ -144,7 +141,7 @@ typedef union { #ifdef TARGET_SPARC typedef union { float128 q; -#if defined(WORDS_BIGENDIAN) \ +#if defined(HOST_WORDS_BIGENDIAN) \ || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) struct { uint32_t upmost; @@ -206,12 +203,12 @@ typedef union { * user : user mode access using soft MMU * kernel : kernel mode access using soft MMU */ -static inline int ldub_p(void *ptr) +static inline int ldub_p(const void *ptr) { return *(uint8_t *)ptr; } -static inline int ldsb_p(void *ptr) +static inline int ldsb_p(const void *ptr) { return *(int8_t *)ptr; } @@ -224,87 +221,58 @@ static inline void stb_p(void *ptr, int v) /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the kernel handles unaligned load/stores may give better results, but it is a system wide setting : bad */ -#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) +#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) /* conservative code for little endian unaligned accesses */ -static inline int lduw_le_p(void *ptr) +static inline int lduw_le_p(const void *ptr) { -#ifdef __powerpc__ +#ifdef _ARCH_PPC int val; __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return val; -#elif defined(__sparc__) -#ifndef ASI_PRIMARY_LITTLE -#define ASI_PRIMARY_LITTLE 0x88 -#endif - - int val; - __asm__ __volatile__ ("lduha [%1] %2, %0" : "=r" (val) : "r" (ptr), - "i" (ASI_PRIMARY_LITTLE)); - return val; #else - uint8_t *p = ptr; + const uint8_t *p = ptr; return p[0] | (p[1] << 8); #endif } -static inline int ldsw_le_p(void *ptr) +static inline int ldsw_le_p(const void *ptr) { -#ifdef __powerpc__ +#ifdef _ARCH_PPC int val; __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return (int16_t)val; -#elif defined(__sparc__) - int val; - __asm__ __volatile__ ("ldsha [%1] %2, %0" : "=r" (val) : "r" (ptr), - "i" (ASI_PRIMARY_LITTLE)); - return val; #else - uint8_t *p = ptr; + const uint8_t *p = ptr; return (int16_t)(p[0] | (p[1] << 8)); #endif } -static inline int ldl_le_p(void *ptr) +static inline int ldl_le_p(const void *ptr) { -#ifdef __powerpc__ +#ifdef _ARCH_PPC int val; __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); return val; -#elif defined(__sparc__) - int val; - __asm__ __volatile__ ("lduwa [%1] %2, %0" : "=r" (val) : "r" (ptr), - "i" (ASI_PRIMARY_LITTLE)); - return val; #else - uint8_t *p = ptr; + const uint8_t *p = ptr; return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); #endif } -static inline uint64_t ldq_le_p(void *ptr) +static inline uint64_t ldq_le_p(const void *ptr) { -#if defined(__sparc__) - uint64_t val; - __asm__ __volatile__ ("ldxa [%1] %2, %0" : "=r" (val) : "r" (ptr), - "i" (ASI_PRIMARY_LITTLE)); - return val; -#else - uint8_t *p = ptr; + const uint8_t *p = ptr; uint32_t v1, v2; v1 = ldl_le_p(p); v2 = ldl_le_p(p + 4); return v1 | ((uint64_t)v2 << 32); -#endif } static inline void stw_le_p(void *ptr, int v) { -#ifdef __powerpc__ +#ifdef _ARCH_PPC __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); -#elif defined(__sparc__) - __asm__ __volatile__ ("stha %1, [%2] %3" : "=m" (*(uint16_t *)ptr) : "r" (v), - "r" (ptr), "i" (ASI_PRIMARY_LITTLE)); #else uint8_t *p = ptr; p[0] = v; @@ -314,11 +282,8 @@ static inline void stw_le_p(void *ptr, int v) static inline void stl_le_p(void *ptr, int v) { -#ifdef __powerpc__ +#ifdef _ARCH_PPC __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); -#elif defined(__sparc__) - __asm__ __volatile__ ("stwa %1, [%2] %3" : "=m" (*(uint32_t *)ptr) : "r" (v), - "r" (ptr), "i" (ASI_PRIMARY_LITTLE)); #else uint8_t *p = ptr; p[0] = v; @@ -330,20 +295,14 @@ static inline void stl_le_p(void *ptr, int v) static inline void stq_le_p(void *ptr, uint64_t v) { -#if defined(__sparc__) - __asm__ __volatile__ ("stxa %1, [%2] %3" : "=m" (*(uint64_t *)ptr) : "r" (v), - "r" (ptr), "i" (ASI_PRIMARY_LITTLE)); -#undef ASI_PRIMARY_LITTLE -#else uint8_t *p = ptr; stl_le_p(p, (uint32_t)v); stl_le_p(p + 4, v >> 32); -#endif } /* float access */ -static inline float32 ldfl_le_p(void *ptr) +static inline float32 ldfl_le_p(const void *ptr) { union { float32 f; @@ -363,7 +322,7 @@ static inline void stfl_le_p(void *ptr, float32 v) stl_le_p(ptr, u.i); } -static inline float64 ldfq_le_p(void *ptr) +static inline float64 ldfq_le_p(const void *ptr) { CPU_DoubleU u; u.l.lower = ldl_le_p(ptr); @@ -381,22 +340,22 @@ static inline void stfq_le_p(void *ptr, float64 v) #else -static inline int lduw_le_p(void *ptr) +static inline int lduw_le_p(const void *ptr) { return *(uint16_t *)ptr; } -static inline int ldsw_le_p(void *ptr) +static inline int ldsw_le_p(const void *ptr) { return *(int16_t *)ptr; } -static inline int ldl_le_p(void *ptr) +static inline int ldl_le_p(const void *ptr) { return *(uint32_t *)ptr; } -static inline uint64_t ldq_le_p(void *ptr) +static inline uint64_t ldq_le_p(const void *ptr) { return *(uint64_t *)ptr; } @@ -418,12 +377,12 @@ static inline void stq_le_p(void *ptr, uint64_t v) /* float access */ -static inline float32 ldfl_le_p(void *ptr) +static inline float32 ldfl_le_p(const void *ptr) { return *(float32 *)ptr; } -static inline float64 ldfq_le_p(void *ptr) +static inline float64 ldfq_le_p(const void *ptr) { return *(float64 *)ptr; } @@ -439,9 +398,9 @@ static inline void stfq_le_p(void *ptr, float64 v) } #endif -#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) +#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) -static inline int lduw_be_p(void *ptr) +static inline int lduw_be_p(const void *ptr) { #if defined(__i386__) int val; @@ -451,12 +410,12 @@ static inline int lduw_be_p(void *ptr) : "m" (*(uint16_t *)ptr)); return val; #else - uint8_t *b = (uint8_t *) ptr; + const uint8_t *b = ptr; return ((b[0] << 8) | b[1]); #endif } -static inline int ldsw_be_p(void *ptr) +static inline int ldsw_be_p(const void *ptr) { #if defined(__i386__) int val; @@ -466,12 +425,12 @@ static inline int ldsw_be_p(void *ptr) : "m" (*(uint16_t *)ptr)); return (int16_t)val; #else - uint8_t *b = (uint8_t *) ptr; + const uint8_t *b = ptr; return (int16_t)((b[0] << 8) | b[1]); #endif } -static inline int ldl_be_p(void *ptr) +static inline int ldl_be_p(const void *ptr) { #if defined(__i386__) || defined(__x86_64__) int val; @@ -481,12 +440,12 @@ static inline int ldl_be_p(void *ptr) : "m" (*(uint32_t *)ptr)); return val; #else - uint8_t *b = (uint8_t *) ptr; + const uint8_t *b = ptr; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; #endif } -static inline uint64_t ldq_be_p(void *ptr) +static inline uint64_t ldq_be_p(const void *ptr) { uint32_t a,b; a = ldl_be_p(ptr); @@ -532,7 +491,7 @@ static inline void stq_be_p(void *ptr, uint64_t v) /* float access */ -static inline float32 ldfl_be_p(void *ptr) +static inline float32 ldfl_be_p(const void *ptr) { union { float32 f; @@ -552,7 +511,7 @@ static inline void stfl_be_p(void *ptr, float32 v) stl_be_p(ptr, u.i); } -static inline float64 ldfq_be_p(void *ptr) +static inline float64 ldfq_be_p(const void *ptr) { CPU_DoubleU u; u.l.upper = ldl_be_p(ptr); @@ -570,22 +529,22 @@ static inline void stfq_be_p(void *ptr, float64 v) #else -static inline int lduw_be_p(void *ptr) +static inline int lduw_be_p(const void *ptr) { return *(uint16_t *)ptr; } -static inline int ldsw_be_p(void *ptr) +static inline int ldsw_be_p(const void *ptr) { return *(int16_t *)ptr; } -static inline int ldl_be_p(void *ptr) +static inline int ldl_be_p(const void *ptr) { return *(uint32_t *)ptr; } -static inline uint64_t ldq_be_p(void *ptr) +static inline uint64_t ldq_be_p(const void *ptr) { return *(uint64_t *)ptr; } @@ -607,12 +566,12 @@ static inline void stq_be_p(void *ptr, uint64_t v) /* float access */ -static inline float32 ldfl_be_p(void *ptr) +static inline float32 ldfl_be_p(const void *ptr) { return *(float32 *)ptr; } -static inline float64 ldfq_be_p(void *ptr) +static inline float64 ldfq_be_p(const void *ptr) { return *(float64 *)ptr; } @@ -659,15 +618,32 @@ static inline void stfq_be_p(void *ptr, float64 v) /* MMU memory access macros */ #if defined(CONFIG_USER_ONLY) +#include +#include "qemu-types.h" + /* On some host systems the guest address space is reserved on the host. * This allows the guest address space to be offset to a convenient location. */ -//#define GUEST_BASE 0x20000000 -#define GUEST_BASE 0 +#if defined(CONFIG_USE_GUEST_BASE) +extern unsigned long guest_base; +extern int have_guest_base; +#define GUEST_BASE guest_base +#else +#define GUEST_BASE 0ul +#endif /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) -#define h2g(x) ((target_ulong)(x - GUEST_BASE)) +#define h2g(x) ({ \ + unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ + /* Check if given address fits target address space */ \ + assert(__ret == (abi_ulong)__ret); \ + (abi_ulong)__ret; \ +}) +#define h2g_valid(x) ({ \ + unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ + (__guest == (abi_ulong)__guest); \ +}) #define saddr(x) g2h(x) #define laddr(x) g2h(x) @@ -763,12 +739,15 @@ extern unsigned long qemu_host_page_mask; #define PAGE_RESERVED 0x0020 void page_dump(FILE *f); +int walk_memory_regions(void *, + int (*fn)(void *, unsigned long, unsigned long, unsigned long)); int page_get_flags(target_ulong address); void page_set_flags(target_ulong start, target_ulong end, int flags); int page_check_range(target_ulong start, target_ulong len, int flags); void cpu_exec_init_all(unsigned long tb_size); CPUState *cpu_copy(CPUState *env); +CPUState *qemu_get_cpu(int cpu); void cpu_dump_state(CPUState *env, FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), @@ -777,13 +756,13 @@ void cpu_dump_statistics (CPUState *env, FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), int flags); -void cpu_abort(CPUState *env, const char *fmt, ...) - __attribute__ ((__format__ (__printf__, 2, 3))) - __attribute__ ((__noreturn__)); +void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) + __attribute__ ((__format__ (__printf__, 2, 3))); extern CPUState *first_cpu; extern CPUState *cpu_single_env; +extern int64_t qemu_icount; +extern int use_icount; -#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ @@ -793,16 +772,37 @@ extern CPUState *cpu_single_env; #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ +#define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */ +#define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */ +#define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */ void cpu_interrupt(CPUState *s, int mask); void cpu_reset_interrupt(CPUState *env, int mask); -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type); -int cpu_watchpoint_remove(CPUState *env, target_ulong addr); -void cpu_watchpoint_remove_all(CPUState *env); -int cpu_breakpoint_insert(CPUState *env, target_ulong pc); -int cpu_breakpoint_remove(CPUState *env, target_ulong pc); -void cpu_breakpoint_remove_all(CPUState *env); +void cpu_exit(CPUState *s); + +int qemu_cpu_has_work(CPUState *env); + +/* Breakpoint/watchpoint flags */ +#define BP_MEM_READ 0x01 +#define BP_MEM_WRITE 0x02 +#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) +#define BP_STOP_BEFORE_ACCESS 0x04 +#define BP_WATCHPOINT_HIT 0x08 +#define BP_GDB 0x10 +#define BP_CPU 0x20 + +int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, + CPUBreakpoint **breakpoint); +int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); +void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); +void cpu_breakpoint_remove_all(CPUState *env, int mask); +int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, + int flags, CPUWatchpoint **watchpoint); +int cpu_watchpoint_remove(CPUState *env, target_ulong addr, + target_ulong len, int flags); +void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); +void cpu_watchpoint_remove_all(CPUState *env, int mask); #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ @@ -825,6 +825,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); #define CPU_LOG_PCALL (1 << 6) #define CPU_LOG_IOPORT (1 << 7) #define CPU_LOG_TB_CPU (1 << 8) +#define CPU_LOG_RESET (1 << 9) /* define log items */ typedef struct CPULogItem { @@ -833,39 +834,21 @@ typedef struct CPULogItem { const char *help; } CPULogItem; -extern CPULogItem cpu_log_items[]; +extern const CPULogItem cpu_log_items[]; void cpu_set_log(int log_flags); void cpu_set_log_filename(const char *filename); int cpu_str_to_log_mask(const char *str); /* IO ports API */ - -/* NOTE: as these functions may be even used when there is an isa - brige on non x86 targets, we always defined them */ -#ifndef NO_CPU_IO_DEFS -void cpu_outb(CPUState *env, int addr, int val); -void cpu_outw(CPUState *env, int addr, int val); -void cpu_outl(CPUState *env, int addr, int val); -int cpu_inb(CPUState *env, int addr); -int cpu_inw(CPUState *env, int addr); -int cpu_inl(CPUState *env, int addr); -#endif - -/* address in the RAM (different from a physical address) */ -#ifdef USE_KQEMU -typedef uint32_t ram_addr_t; -#else -typedef unsigned long ram_addr_t; -#endif +#include "ioport.h" /* memory API */ -extern ram_addr_t phys_ram_size; extern int phys_ram_fd; -extern uint8_t *phys_ram_base; extern uint8_t *phys_ram_dirty; extern ram_addr_t ram_size; +extern ram_addr_t last_ram_offset; /* physical memory access */ @@ -873,19 +856,8 @@ extern ram_addr_t ram_size; 3 flags. The ROMD code stores the page ram offset in iotlb entry, so only a limited number of ids are avaiable. */ -#define IO_MEM_SHIFT 3 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) -#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ -#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ -#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) -#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT) - -/* Acts like a ROM when read and like a device when written. */ -#define IO_MEM_ROMD (1) -#define IO_MEM_SUBPAGE (2) -#define IO_MEM_SUBWIDTH (4) - /* Flags stored in the low bits of the TLB virtual address. These are defined so that fast path ram access is all zeros. */ /* Zero if TLB entry is valid. */ @@ -896,52 +868,12 @@ extern ram_addr_t ram_size; /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << 5) -typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); -typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); - -void cpu_register_physical_memory(target_phys_addr_t start_addr, - ram_addr_t size, - ram_addr_t phys_offset); -ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); -ram_addr_t qemu_ram_alloc(ram_addr_t); -void qemu_ram_free(ram_addr_t addr); -int cpu_register_io_memory(int io_index, - CPUReadMemoryFunc **mem_read, - CPUWriteMemoryFunc **mem_write, - void *opaque); -CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index); -CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index); - -void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, - int len, int is_write); -static inline void cpu_physical_memory_read(target_phys_addr_t addr, - uint8_t *buf, int len) -{ - cpu_physical_memory_rw(addr, buf, len, 0); -} -static inline void cpu_physical_memory_write(target_phys_addr_t addr, - const uint8_t *buf, int len) -{ - cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); -} -uint32_t ldub_phys(target_phys_addr_t addr); -uint32_t lduw_phys(target_phys_addr_t addr); -uint32_t ldl_phys(target_phys_addr_t addr); -uint64_t ldq_phys(target_phys_addr_t addr); -void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val); -void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val); -void stb_phys(target_phys_addr_t addr, uint32_t val); -void stw_phys(target_phys_addr_t addr, uint32_t val); -void stl_phys(target_phys_addr_t addr, uint32_t val); -void stq_phys(target_phys_addr_t addr, uint64_t val); - -void cpu_physical_memory_write_rom(target_phys_addr_t addr, - const uint8_t *buf, int len); int cpu_memory_rw_debug(CPUState *env, target_ulong addr, uint8_t *buf, int len, int is_write); -#define VGA_DIRTY_FLAG 0x01 -#define CODE_DIRTY_FLAG 0x02 +#define VGA_DIRTY_FLAG 0x01 +#define CODE_DIRTY_FLAG 0x02 +#define MIGRATION_DIRTY_FLAG 0x08 /* read dirty bit (return 0 or 1) */ static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) @@ -964,38 +896,54 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags); void cpu_tlb_update_dirty(CPUState *env); +int cpu_physical_memory_set_dirty_tracking(int enable); + +int cpu_physical_memory_get_dirty_tracking(void); + +int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, + target_phys_addr_t end_addr); + void dump_exec_info(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); -/*******************************************/ -/* host CPU ticks (if available) */ +/* Coalesced MMIO regions are areas where write operations can be reordered. + * This usually implies that write operations are side-effect free. This allows + * batching which can make a major impact on performance when using + * virtualization. + */ +void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); -#if defined(__powerpc__) +void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); -static inline uint32_t get_tbl(void) -{ - uint32_t tbl; - asm volatile("mftb %0" : "=r" (tbl)); - return tbl; -} +/*******************************************/ +/* host CPU ticks (if available) */ -static inline uint32_t get_tbu(void) -{ - uint32_t tbl; - asm volatile("mftbu %0" : "=r" (tbl)); - return tbl; -} +#if defined(_ARCH_PPC) static inline int64_t cpu_get_real_ticks(void) { - uint32_t l, h, h1; - /* NOTE: we test if wrapping has occurred */ - do { - h = get_tbu(); - l = get_tbl(); - h1 = get_tbu(); - } while (h != h1); - return ((int64_t)h << 32) | l; + int64_t retval; +#ifdef _ARCH_PPC64 + /* This reads timebase in one 64bit go and includes Cell workaround from: + http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html + */ + __asm__ __volatile__ ( + "mftb %0\n\t" + "cmpwi %0,0\n\t" + "beq- $-8" + : "=r" (retval)); +#else + /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ + unsigned long junk; + __asm__ __volatile__ ( + "mftbu %1\n\t" + "mftb %L0\n\t" + "mftbu %0\n\t" + "cmpw %0,%1\n\t" + "bne $-16" + : "=r" (retval), "=r" (junk)); +#endif + return retval; } #elif defined(__i386__) @@ -1073,7 +1021,7 @@ static inline int64_t cpu_get_real_ticks (void) static inline int64_t cpu_get_real_ticks(void) { -#if __mips_isa_rev >= 2 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 2 uint32_t count; static uint32_t cyc_per_count = 0; @@ -1107,14 +1055,12 @@ static inline int64_t profile_getclock(void) return cpu_get_real_ticks(); } -extern int64_t kqemu_time, kqemu_time_start; extern int64_t qemu_time, qemu_time_start; extern int64_t tlb_flush_time; -extern int64_t kqemu_exec_count; extern int64_t dev_time; -extern int64_t kqemu_ret_int_count; -extern int64_t kqemu_ret_excp_count; -extern int64_t kqemu_ret_intr_count; #endif +void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, + uint64_t mcg_status, uint64_t addr, uint64_t misc); + #endif /* CPU_ALL_H */