2 * Linux kernel wrapper for KQEMU
3 * Copyright (c) 2004-2005 Fabrice Bellard
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/errno.h>
10 #include <linux/proc_fs.h>
11 #include <linux/version.h>
12 #include <linux/ioctl.h>
13 #include <linux/smp_lock.h>
14 #include <linux/miscdevice.h>
15 #include <asm/atomic.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
20 #include "kqemu-kernel.h"
22 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
23 #error "Linux 2.4.19 or above needed"
27 #define page_to_pfn(page) ((page) - mem_map)
28 #define pfn_to_page(pfn) (mem_map + (pfn))
31 #ifdef PAGE_KERNEL_EXEC
33 /* problem : i386 kernels usually don't export __PAGE_KERNEL_EXEC */
34 #undef PAGE_KERNEL_EXEC
35 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX)
38 #define PAGE_KERNEL_EXEC PAGE_KERNEL
48 /* if 0 is used, then devfs/udev is used to automatically create the
51 MODULE_PARM(major,"i");
53 /* configurable max_instances */
54 int max_instances = 4;
55 MODULE_PARM(max_instances,"i");
57 /* lock the page at virtual address 'user_addr' and return its
58 page index. Return -1 if error */
59 struct kqemu_user_page *CDECL kqemu_lock_user_page(unsigned long *ppage_index,
60 unsigned long user_addr)
65 ret = get_user_pages(current, current->mm,
68 1, /* 'write': intent to write. */
74 /* we ensure here that the page cannot be swapped out by the
76 /* XXX: This test may be incorrect for 2.6 kernels */
84 *ppage_index = page_to_pfn(page);
85 return (struct kqemu_user_page *)page;
88 void CDECL kqemu_unlock_user_page(struct kqemu_user_page *page1)
90 struct page *page = (struct page *)page1;
98 /* Allocate a new page. The page must be mapped in the kernel
99 space. Return the page_index or -1 if error */
100 struct kqemu_page *CDECL kqemu_alloc_zeroed_page(unsigned long *ppage_index)
105 vaddr = get_zeroed_page(GFP_KERNEL);
111 page = virt_to_page(vaddr);
112 *ppage_index = page_to_pfn(page);
113 return (struct kqemu_page *)page;
116 void CDECL kqemu_free_page(struct kqemu_page *page1)
118 struct page *page = (struct page *)page1;
125 /* return a kernel address of the physical page page_index */
126 void * CDECL kqemu_page_kaddr(struct kqemu_page *page1)
128 struct page *page = (struct page *)page1;
129 return page_address(page);
132 /* contraint: each page of the vmalloced area must be in the first 4
133 GB of physical memory. Moreover, execution of code should be
134 enabled in the allocated area. */
135 void * CDECL kqemu_vmalloc(unsigned int size)
137 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
140 void CDECL kqemu_vfree(void *ptr)
145 unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
148 page = vmalloc_to_page((void *)vaddr);
151 return page_to_pfn(page);
154 /* Map a IO area in the kernel address space and return its
155 address. Return NULL if error or not implemented. */
156 void * CDECL kqemu_io_map(unsigned long page_index, unsigned int size)
158 return ioremap(page_index << PAGE_SHIFT, size);
161 /* Unmap the IO area */
162 void CDECL kqemu_io_unmap(void *ptr, unsigned int size)
167 /* return TRUE if a signal is pending (i.e. the guest must stop
169 int CDECL kqemu_schedule(void)
171 if (need_resched()) {
174 return signal_pending(current);
179 void CDECL kqemu_log(const char *fmt, ...)
183 vsnprintf(log_buf, sizeof(log_buf), fmt, ap);
184 printk("kqemu: %s", log_buf);
188 /*********************************************************/
190 static int kqemu_nb_instances = 0;
191 static spinlock_t kqemu_lock = SPIN_LOCK_UNLOCKED;
192 static int max_locked_pages;
194 struct kqemu_instance {
195 struct semaphore sem;
196 struct kqemu_state *state;
199 static int kqemu_open(struct inode *inode, struct file *filp)
201 struct kqemu_instance *ks;
203 spin_lock(&kqemu_lock);
204 if (kqemu_nb_instances >= max_instances) {
205 spin_unlock(&kqemu_lock);
208 kqemu_nb_instances++;
209 spin_unlock(&kqemu_lock);
211 ks = kmalloc(sizeof(struct kqemu_instance), GFP_KERNEL);
214 init_MUTEX(&ks->sem);
216 filp->private_data = ks;
220 static int kqemu_release(struct inode *inode, struct file *filp)
222 struct kqemu_instance *ks = filp->private_data;
226 kqemu_delete(ks->state);
233 spin_lock(&kqemu_lock);
234 kqemu_nb_instances--;
235 spin_unlock(&kqemu_lock);
238 printk("lock_count=%d page_alloc_count=%d\n",
239 lock_count, page_alloc_count);
244 static int kqemu_ioctl(struct inode *inode, struct file *filp,
245 unsigned int cmd, unsigned long arg)
247 struct kqemu_instance *ks = filp->private_data;
248 struct kqemu_state *s = ks->state;
255 struct kqemu_init d1, *d = &d1;
260 if (copy_from_user(d, (void *)arg, sizeof(*d))) {
264 s = kqemu_init(d, max_locked_pages);
275 struct kqemu_cpu_state *ctx;
281 ctx = kqemu_get_cpu_state(s);
282 if (copy_from_user(ctx, (void *)arg, sizeof(*ctx))) {
289 if (copy_to_user((void *)arg, ctx, sizeof(*ctx))) {
295 case KQEMU_GET_VERSION:
297 if (put_user(KQEMU_VERSION, (int *)arg) < 0) {
312 static struct file_operations kqemu_fops = {
316 release: kqemu_release,
319 static struct miscdevice kqemu_dev =
321 .minor = MISC_DYNAMIC_MINOR,
326 int init_module(void)
331 printk("QEMU Accelerator Module version %d.%d.%d, Copyright (c) 2005 Fabrice Bellard\n"
332 "This is a proprietary product. Read the LICENSE file for more information\n"
333 "Redistribution of this module is prohibited without authorization\n",
334 (KQEMU_VERSION >> 16),
335 (KQEMU_VERSION >> 8) & 0xff,
336 (KQEMU_VERSION) & 0xff);
338 max_locked_pages = si.totalram / (2 * max_instances);
339 if (max_locked_pages > 32768)
340 max_locked_pages = 32768;
343 ret = register_chrdev(major, "kqemu", &kqemu_fops);
345 printk("kqemu: could not get major %d\n", major);
349 ret = misc_register (&kqemu_dev);
351 printk("kqemu: could not create device\n");
355 printk("KQEMU installed, max_instances=%d max_locked_mem=%dkB.\n",
357 max_locked_pages * 4);
361 void cleanup_module(void)
364 unregister_chrdev(major, "kqemu");
366 misc_deregister (&kqemu_dev);