4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
30 #define dprintf(fmt, ...) \
31 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
33 #define dprintf(fmt, ...) \
37 int kvm_arch_init_vcpu(CPUState *env)
40 struct kvm_cpuid2 cpuid;
41 struct kvm_cpuid_entry2 entries[100];
42 } __attribute__((packed)) cpuid_data;
43 uint32_t limit, i, j, cpuid_i;
44 uint32_t eax, ebx, ecx, edx;
48 cpu_x86_cpuid(env, 0, 0, &eax, &ebx, &ecx, &edx);
51 for (i = 0; i <= limit; i++) {
52 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
56 /* Keep reading function 2 till all the input is received */
59 cpu_x86_cpuid(env, i, 0, &eax, &ebx, &ecx, &edx);
63 c->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
64 c->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
70 for (j = 1; j < times; ++j) {
71 cpu_x86_cpuid(env, i, 0, &eax, &ebx, &ecx, &edx);
73 c->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
78 c = &cpuid_data.entries[++cpuid_i];
86 cpu_x86_cpuid(env, i, j, &eax, &ebx, &ecx, &edx);
88 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
94 c = &cpuid_data.entries[++cpuid_i];
96 if (i == 4 && eax == 0)
98 if (i == 0xb && !(ecx & 0xff00))
100 if (i == 0xd && eax == 0)
105 cpu_x86_cpuid(env, i, 0, &eax, &ebx, &ecx, &edx);
114 cpu_x86_cpuid(env, 0x80000000, 0, &eax, &ebx, &ecx, &edx);
117 for (i = 0x80000000; i <= limit; i++) {
118 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
120 cpu_x86_cpuid(env, i, 0, &eax, &ebx, &ecx, &edx);
128 cpuid_data.cpuid.nent = cpuid_i;
130 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
133 static int kvm_has_msr_star(CPUState *env)
135 static int has_msr_star;
139 if (has_msr_star == 0) {
140 struct kvm_msr_list msr_list, *kvm_msr_list;
144 /* Obtain MSR list from KVM. These are the MSRs that we must
147 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
151 kvm_msr_list = qemu_mallocz(sizeof(msr_list) +
152 msr_list.nmsrs * sizeof(msr_list.indices[0]));
154 kvm_msr_list->nmsrs = msr_list.nmsrs;
155 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
159 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
160 if (kvm_msr_list->indices[i] == MSR_STAR) {
170 if (has_msr_star == 1)
175 int kvm_arch_init(KVMState *s, int smp_cpus)
179 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
180 * directly. In order to use vm86 mode, a TSS is needed. Since this
181 * must be part of guest physical memory, we need to allocate it. Older
182 * versions of KVM just assumed that it would be at the end of physical
183 * memory but that doesn't work with more than 4GB of memory. We simply
184 * refuse to work with those older versions of KVM. */
185 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
187 fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
191 /* this address is 3 pages before the bios, and the bios should present
192 * as unavaible memory. FIXME, need to ensure the e820 map deals with
195 return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
198 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
200 lhs->selector = rhs->selector;
201 lhs->base = rhs->base;
202 lhs->limit = rhs->limit;
214 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
216 unsigned flags = rhs->flags;
217 lhs->selector = rhs->selector;
218 lhs->base = rhs->base;
219 lhs->limit = rhs->limit;
220 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
221 lhs->present = (flags & DESC_P_MASK) != 0;
222 lhs->dpl = rhs->selector & 3;
223 lhs->db = (flags >> DESC_B_SHIFT) & 1;
224 lhs->s = (flags & DESC_S_MASK) != 0;
225 lhs->l = (flags >> DESC_L_SHIFT) & 1;
226 lhs->g = (flags & DESC_G_MASK) != 0;
227 lhs->avl = (flags & DESC_AVL_MASK) != 0;
231 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
233 lhs->selector = rhs->selector;
234 lhs->base = rhs->base;
235 lhs->limit = rhs->limit;
237 (rhs->type << DESC_TYPE_SHIFT)
238 | (rhs->present * DESC_P_MASK)
239 | (rhs->dpl << DESC_DPL_SHIFT)
240 | (rhs->db << DESC_B_SHIFT)
241 | (rhs->s * DESC_S_MASK)
242 | (rhs->l << DESC_L_SHIFT)
243 | (rhs->g * DESC_G_MASK)
244 | (rhs->avl * DESC_AVL_MASK);
247 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
250 *kvm_reg = *qemu_reg;
252 *qemu_reg = *kvm_reg;
255 static int kvm_getput_regs(CPUState *env, int set)
257 struct kvm_regs regs;
261 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
266 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
267 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
268 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
269 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
270 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
271 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
272 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
273 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
275 kvm_getput_reg(®s.r8, &env->regs[8], set);
276 kvm_getput_reg(®s.r9, &env->regs[9], set);
277 kvm_getput_reg(®s.r10, &env->regs[10], set);
278 kvm_getput_reg(®s.r11, &env->regs[11], set);
279 kvm_getput_reg(®s.r12, &env->regs[12], set);
280 kvm_getput_reg(®s.r13, &env->regs[13], set);
281 kvm_getput_reg(®s.r14, &env->regs[14], set);
282 kvm_getput_reg(®s.r15, &env->regs[15], set);
285 kvm_getput_reg(®s.rflags, &env->eflags, set);
286 kvm_getput_reg(®s.rip, &env->eip, set);
289 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
294 static int kvm_put_fpu(CPUState *env)
299 memset(&fpu, 0, sizeof fpu);
300 fpu.fsw = env->fpus & ~(7 << 11);
301 fpu.fsw |= (env->fpstt & 7) << 11;
303 for (i = 0; i < 8; ++i)
304 fpu.ftwx |= (!env->fptags[i]) << i;
305 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
306 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
307 fpu.mxcsr = env->mxcsr;
309 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
312 static int kvm_put_sregs(CPUState *env)
314 struct kvm_sregs sregs;
316 memcpy(sregs.interrupt_bitmap,
317 env->interrupt_bitmap,
318 sizeof(sregs.interrupt_bitmap));
320 if ((env->eflags & VM_MASK)) {
321 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
322 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
323 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
324 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
325 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
326 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
328 set_seg(&sregs.cs, &env->segs[R_CS]);
329 set_seg(&sregs.ds, &env->segs[R_DS]);
330 set_seg(&sregs.es, &env->segs[R_ES]);
331 set_seg(&sregs.fs, &env->segs[R_FS]);
332 set_seg(&sregs.gs, &env->segs[R_GS]);
333 set_seg(&sregs.ss, &env->segs[R_SS]);
335 if (env->cr[0] & CR0_PE_MASK) {
336 /* force ss cpl to cs cpl */
337 sregs.ss.selector = (sregs.ss.selector & ~3) |
338 (sregs.cs.selector & 3);
339 sregs.ss.dpl = sregs.ss.selector & 3;
343 set_seg(&sregs.tr, &env->tr);
344 set_seg(&sregs.ldt, &env->ldt);
346 sregs.idt.limit = env->idt.limit;
347 sregs.idt.base = env->idt.base;
348 sregs.gdt.limit = env->gdt.limit;
349 sregs.gdt.base = env->gdt.base;
351 sregs.cr0 = env->cr[0];
352 sregs.cr2 = env->cr[2];
353 sregs.cr3 = env->cr[3];
354 sregs.cr4 = env->cr[4];
356 sregs.cr8 = cpu_get_apic_tpr(env);
357 sregs.apic_base = cpu_get_apic_base(env);
359 sregs.efer = env->efer;
361 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
364 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
365 uint32_t index, uint64_t value)
367 entry->index = index;
371 static int kvm_put_msrs(CPUState *env)
374 struct kvm_msrs info;
375 struct kvm_msr_entry entries[100];
377 struct kvm_msr_entry *msrs = msr_data.entries;
380 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
381 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
382 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
383 if (kvm_has_msr_star(env))
384 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
385 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
387 /* FIXME if lm capable */
388 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
389 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
390 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
391 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
393 msr_data.info.nmsrs = n;
395 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
400 static int kvm_get_fpu(CPUState *env)
405 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
409 env->fpstt = (fpu.fsw >> 11) & 7;
412 for (i = 0; i < 8; ++i)
413 env->fptags[i] = !((fpu.ftwx >> i) & 1);
414 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
415 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
416 env->mxcsr = fpu.mxcsr;
421 static int kvm_get_sregs(CPUState *env)
423 struct kvm_sregs sregs;
427 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
431 memcpy(env->interrupt_bitmap,
432 sregs.interrupt_bitmap,
433 sizeof(sregs.interrupt_bitmap));
435 get_seg(&env->segs[R_CS], &sregs.cs);
436 get_seg(&env->segs[R_DS], &sregs.ds);
437 get_seg(&env->segs[R_ES], &sregs.es);
438 get_seg(&env->segs[R_FS], &sregs.fs);
439 get_seg(&env->segs[R_GS], &sregs.gs);
440 get_seg(&env->segs[R_SS], &sregs.ss);
442 get_seg(&env->tr, &sregs.tr);
443 get_seg(&env->ldt, &sregs.ldt);
445 env->idt.limit = sregs.idt.limit;
446 env->idt.base = sregs.idt.base;
447 env->gdt.limit = sregs.gdt.limit;
448 env->gdt.base = sregs.gdt.base;
450 env->cr[0] = sregs.cr0;
451 env->cr[2] = sregs.cr2;
452 env->cr[3] = sregs.cr3;
453 env->cr[4] = sregs.cr4;
455 cpu_set_apic_base(env, sregs.apic_base);
457 env->efer = sregs.efer;
458 //cpu_set_apic_tpr(env, sregs.cr8);
460 #define HFLAG_COPY_MASK ~( \
461 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
462 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
463 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
464 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
468 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
469 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
470 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
471 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
472 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
473 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
474 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
476 if (env->efer & MSR_EFER_LMA) {
477 hflags |= HF_LMA_MASK;
480 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
481 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
483 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
484 (DESC_B_SHIFT - HF_CS32_SHIFT);
485 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
486 (DESC_B_SHIFT - HF_SS32_SHIFT);
487 if (!(env->cr[0] & CR0_PE_MASK) ||
488 (env->eflags & VM_MASK) ||
489 !(hflags & HF_CS32_MASK)) {
490 hflags |= HF_ADDSEG_MASK;
492 hflags |= ((env->segs[R_DS].base |
493 env->segs[R_ES].base |
494 env->segs[R_SS].base) != 0) <<
498 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
503 static int kvm_get_msrs(CPUState *env)
506 struct kvm_msrs info;
507 struct kvm_msr_entry entries[100];
509 struct kvm_msr_entry *msrs = msr_data.entries;
513 msrs[n++].index = MSR_IA32_SYSENTER_CS;
514 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
515 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
516 if (kvm_has_msr_star(env))
517 msrs[n++].index = MSR_STAR;
518 msrs[n++].index = MSR_IA32_TSC;
520 /* FIXME lm_capable_kernel */
521 msrs[n++].index = MSR_CSTAR;
522 msrs[n++].index = MSR_KERNELGSBASE;
523 msrs[n++].index = MSR_FMASK;
524 msrs[n++].index = MSR_LSTAR;
526 msr_data.info.nmsrs = n;
527 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
531 for (i = 0; i < ret; i++) {
532 switch (msrs[i].index) {
533 case MSR_IA32_SYSENTER_CS:
534 env->sysenter_cs = msrs[i].data;
536 case MSR_IA32_SYSENTER_ESP:
537 env->sysenter_esp = msrs[i].data;
539 case MSR_IA32_SYSENTER_EIP:
540 env->sysenter_eip = msrs[i].data;
543 env->star = msrs[i].data;
547 env->cstar = msrs[i].data;
549 case MSR_KERNELGSBASE:
550 env->kernelgsbase = msrs[i].data;
553 env->fmask = msrs[i].data;
556 env->lstar = msrs[i].data;
560 env->tsc = msrs[i].data;
568 int kvm_arch_put_registers(CPUState *env)
572 ret = kvm_getput_regs(env, 1);
576 ret = kvm_put_fpu(env);
580 ret = kvm_put_sregs(env);
584 ret = kvm_put_msrs(env);
591 int kvm_arch_get_registers(CPUState *env)
595 ret = kvm_getput_regs(env, 0);
599 ret = kvm_get_fpu(env);
603 ret = kvm_get_sregs(env);
607 ret = kvm_get_msrs(env);
614 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
616 /* Try to inject an interrupt if the guest can accept it */
617 if (run->ready_for_interrupt_injection &&
618 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
619 (env->eflags & IF_MASK)) {
622 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
623 irq = cpu_get_pic_interrupt(env);
625 struct kvm_interrupt intr;
628 dprintf("injected interrupt %d\n", irq);
629 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
633 /* If we have an interrupt but the guest is not ready to receive an
634 * interrupt, request an interrupt window exit. This will
635 * cause a return to userspace as soon as the guest is ready to
636 * receive interrupts. */
637 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
638 run->request_interrupt_window = 1;
640 run->request_interrupt_window = 0;
642 dprintf("setting tpr\n");
643 run->cr8 = cpu_get_apic_tpr(env);
648 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
651 env->eflags |= IF_MASK;
653 env->eflags &= ~IF_MASK;
655 cpu_set_apic_tpr(env, run->cr8);
656 cpu_set_apic_base(env, run->apic_base);
661 static int kvm_handle_halt(CPUState *env)
663 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
664 (env->eflags & IF_MASK)) &&
665 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
667 env->exception_index = EXCP_HLT;
674 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
678 switch (run->exit_reason) {
680 dprintf("handle_hlt\n");
681 ret = kvm_handle_halt(env);
688 #ifdef KVM_CAP_SET_GUEST_DEBUG
689 static int kvm_patch_opcode_byte(CPUState *env, target_ulong addr, uint8_t val)
691 target_phys_addr_t phys_page_addr;
695 phys_page_addr = cpu_get_phys_page_debug(env, addr & TARGET_PAGE_MASK);
696 if (phys_page_addr == -1)
699 pd = cpu_get_physical_page_desc(phys_page_addr);
700 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
701 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && !(pd & IO_MEM_ROMD))
704 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK)
705 + (addr & ~TARGET_PAGE_MASK);
710 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
712 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
713 kvm_patch_opcode_byte(env, bp->pc, 0xcc))
718 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
722 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
723 kvm_patch_opcode_byte(env, bp->pc, bp->saved_insn))
734 static int nb_hw_breakpoint;
736 static int find_hw_breakpoint(target_ulong addr, int len, int type)
740 for (n = 0; n < nb_hw_breakpoint; n++)
741 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
742 (hw_breakpoint[n].len == len || len == -1))
747 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
748 target_ulong len, int type)
751 case GDB_BREAKPOINT_HW:
754 case GDB_WATCHPOINT_WRITE:
755 case GDB_WATCHPOINT_ACCESS:
762 if (addr & (len - 1))
773 if (nb_hw_breakpoint == 4)
776 if (find_hw_breakpoint(addr, len, type) >= 0)
779 hw_breakpoint[nb_hw_breakpoint].addr = addr;
780 hw_breakpoint[nb_hw_breakpoint].len = len;
781 hw_breakpoint[nb_hw_breakpoint].type = type;
787 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
788 target_ulong len, int type)
792 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
797 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
802 void kvm_arch_remove_all_hw_breakpoints(void)
804 nb_hw_breakpoint = 0;
807 static CPUWatchpoint hw_watchpoint;
809 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
814 if (arch_info->exception == 1) {
815 if (arch_info->dr6 & (1 << 14)) {
816 if (cpu_single_env->singlestep_enabled)
819 for (n = 0; n < 4; n++)
820 if (arch_info->dr6 & (1 << n))
821 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
827 cpu_single_env->watchpoint_hit = &hw_watchpoint;
828 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
829 hw_watchpoint.flags = BP_MEM_WRITE;
833 cpu_single_env->watchpoint_hit = &hw_watchpoint;
834 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
835 hw_watchpoint.flags = BP_MEM_ACCESS;
839 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
843 kvm_update_guest_debug(cpu_single_env,
844 (arch_info->exception == 1) ?
845 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
850 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
852 const uint8_t type_code[] = {
853 [GDB_BREAKPOINT_HW] = 0x0,
854 [GDB_WATCHPOINT_WRITE] = 0x1,
855 [GDB_WATCHPOINT_ACCESS] = 0x3
857 const uint8_t len_code[] = {
858 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
862 if (kvm_sw_breakpoints_active(env))
863 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
865 if (nb_hw_breakpoint > 0) {
866 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
867 dbg->arch.debugreg[7] = 0x0600;
868 for (n = 0; n < nb_hw_breakpoint; n++) {
869 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
870 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
871 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
872 (len_code[hw_breakpoint[n].len] << (18 + n*4));
876 #endif /* KVM_CAP_SET_GUEST_DEBUG */