4 #define KQEMU_VERSION 0x010100
6 struct kqemu_segment_cache {
13 struct kqemu_cpu_state {
15 unsigned long regs[16];
17 unsigned long regs[8];
22 uint32_t dummy0, dummy1, dumm2, dummy3, dummy4;
24 struct kqemu_segment_cache segs[6]; /* selector values */
25 struct kqemu_segment_cache ldt;
26 struct kqemu_segment_cache tr;
27 struct kqemu_segment_cache gdt; /* only base and limit are used */
28 struct kqemu_segment_cache idt; /* only base and limit are used */
37 uint64_t efer __attribute__((aligned(8)));
46 int cpl; /* currently only 3 */
48 uint32_t error_code; /* error_code when exiting with an exception */
49 unsigned long next_eip; /* next eip value when exiting with an interrupt */
50 unsigned int nb_pages_to_flush; /* number of pages to flush,
51 KQEMU_FLUSH_ALL means full flush */
52 #define KQEMU_MAX_PAGES_TO_FLUSH 512
53 #define KQEMU_FLUSH_ALL (KQEMU_MAX_PAGES_TO_FLUSH + 1)
59 uint8_t *ram_base; /* must be page aligned */
60 unsigned long ram_size; /* must be multiple of 4 KB */
61 uint8_t *ram_dirty; /* must be page aligned */
62 uint32_t **phys_to_ram_map; /* must be page aligned */
63 unsigned long *pages_to_flush; /* must be page aligned */
66 #define KQEMU_RET_ABORT (-1)
67 #define KQEMU_RET_EXCEPTION 0x0000 /* 8 low order bit are the exception */
68 #define KQEMU_RET_INT 0x0100 /* 8 low order bit are the interrupt */
69 #define KQEMU_RET_SOFTMMU 0x0200 /* emulation needed (I/O or
71 #define KQEMU_RET_INTR 0x0201 /* interrupted by a signal */
72 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
75 #define KQEMU_EXEC CTL_CODE(FILE_DEVICE_UNKNOWN, 1, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS)
76 #define KQEMU_INIT CTL_CODE(FILE_DEVICE_UNKNOWN, 2, METHOD_BUFFERED, FILE_WRITE_ACCESS)
77 #define KQEMU_GET_VERSION CTL_CODE(FILE_DEVICE_UNKNOWN, 3, METHOD_BUFFERED, FILE_READ_ACCESS)
79 #define KQEMU_EXEC _IOWR('q', 1, struct kqemu_cpu_state)
80 #define KQEMU_INIT _IOW('q', 2, struct kqemu_init)
81 #define KQEMU_GET_VERSION _IOR('q', 3, int)
87 #define CDECL __attribute__((regparm(0)))
89 struct kqemu_state * CDECL kqemu_init(struct kqemu_init *d, int max_locked_pages);
90 struct kqemu_cpu_state * CDECL kqemu_get_cpu_state(struct kqemu_state *s);
91 long CDECL kqemu_exec(struct kqemu_state *s);
92 void CDECL kqemu_delete(struct kqemu_state *s);
95 struct kqemu_page; /* opaque data for host page */
96 struct kqemu_user_page; /* opaque data for host user page */
98 struct kqemu_user_page *CDECL kqemu_lock_user_page(unsigned long *ppage_index,
99 unsigned long user_addr);
100 void CDECL kqemu_unlock_user_page(struct kqemu_user_page *page);
102 struct kqemu_page *CDECL kqemu_alloc_zeroed_page(unsigned long *ppage_index);
103 void CDECL kqemu_free_page(struct kqemu_page *page);
104 void * CDECL kqemu_page_kaddr(struct kqemu_page *page);
106 void * CDECL kqemu_vmalloc(unsigned int size);
107 void CDECL kqemu_vfree(void *ptr);
108 unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr);
110 void * CDECL kqemu_io_map(unsigned long page_index, unsigned int size);
111 void CDECL kqemu_io_unmap(void *ptr, unsigned int size);
113 int CDECL kqemu_schedule(void);
115 void CDECL kqemu_log(const char *fmt, ...);