4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
30 #define dprintf(fmt, ...) \
31 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
33 #define dprintf(fmt, ...) \
37 #ifdef KVM_CAP_EXT_CPUID
39 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
41 struct kvm_cpuid2 *cpuid;
44 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
45 cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
47 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
48 if (r == 0 && cpuid->nent >= max) {
56 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
64 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
66 struct kvm_cpuid2 *cpuid;
71 if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
76 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
80 for (i = 0; i < cpuid->nent; ++i) {
81 if (cpuid->entries[i].function == function) {
84 ret = cpuid->entries[i].eax;
87 ret = cpuid->entries[i].ebx;
90 ret = cpuid->entries[i].ecx;
93 ret = cpuid->entries[i].edx;
94 if (function == 0x80000001) {
95 /* On Intel, kvm returns cpuid according to the Intel spec,
96 * so add missing bits according to the AMD spec:
98 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
99 ret |= cpuid_1_edx & 0xdfeff7ff;
113 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
120 static void kvm_trim_features(uint32_t *features, uint32_t supported)
125 for (i = 0; i < 32; ++i) {
127 if ((*features & mask) && !(supported & mask)) {
133 int kvm_arch_init_vcpu(CPUState *env)
136 struct kvm_cpuid2 cpuid;
137 struct kvm_cpuid_entry2 entries[100];
138 } __attribute__((packed)) cpuid_data;
139 uint32_t limit, i, j, cpuid_i;
142 env->mp_state = KVM_MP_STATE_RUNNABLE;
144 kvm_trim_features(&env->cpuid_features,
145 kvm_arch_get_supported_cpuid(env, 1, R_EDX));
147 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
148 kvm_trim_features(&env->cpuid_ext_features,
149 kvm_arch_get_supported_cpuid(env, 1, R_ECX));
150 env->cpuid_ext_features |= i;
152 kvm_trim_features(&env->cpuid_ext2_features,
153 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX));
154 kvm_trim_features(&env->cpuid_ext3_features,
155 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX));
159 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
161 for (i = 0; i <= limit; i++) {
162 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
166 /* Keep reading function 2 till all the input is received */
170 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
171 KVM_CPUID_FLAG_STATE_READ_NEXT;
172 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
173 times = c->eax & 0xff;
175 for (j = 1; j < times; ++j) {
176 c = &cpuid_data.entries[cpuid_i++];
178 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
179 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
188 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
190 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
192 if (i == 4 && c->eax == 0)
194 if (i == 0xb && !(c->ecx & 0xff00))
196 if (i == 0xd && c->eax == 0)
199 c = &cpuid_data.entries[cpuid_i++];
205 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
209 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
211 for (i = 0x80000000; i <= limit; i++) {
212 struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
216 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
219 cpuid_data.cpuid.nent = cpuid_i;
221 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
224 static int kvm_has_msr_star(CPUState *env)
226 static int has_msr_star;
230 if (has_msr_star == 0) {
231 struct kvm_msr_list msr_list, *kvm_msr_list;
235 /* Obtain MSR list from KVM. These are the MSRs that we must
238 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
242 /* Old kernel modules had a bug and could write beyond the provided
243 memory. Allocate at least a safe amount of 1K. */
244 kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
246 sizeof(msr_list.indices[0])));
248 kvm_msr_list->nmsrs = msr_list.nmsrs;
249 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
253 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
254 if (kvm_msr_list->indices[i] == MSR_STAR) {
264 if (has_msr_star == 1)
269 int kvm_arch_init(KVMState *s, int smp_cpus)
273 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
274 * directly. In order to use vm86 mode, a TSS is needed. Since this
275 * must be part of guest physical memory, we need to allocate it. Older
276 * versions of KVM just assumed that it would be at the end of physical
277 * memory but that doesn't work with more than 4GB of memory. We simply
278 * refuse to work with those older versions of KVM. */
279 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
281 fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
285 /* this address is 3 pages before the bios, and the bios should present
286 * as unavaible memory. FIXME, need to ensure the e820 map deals with
289 return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
292 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
294 lhs->selector = rhs->selector;
295 lhs->base = rhs->base;
296 lhs->limit = rhs->limit;
308 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
310 unsigned flags = rhs->flags;
311 lhs->selector = rhs->selector;
312 lhs->base = rhs->base;
313 lhs->limit = rhs->limit;
314 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
315 lhs->present = (flags & DESC_P_MASK) != 0;
316 lhs->dpl = rhs->selector & 3;
317 lhs->db = (flags >> DESC_B_SHIFT) & 1;
318 lhs->s = (flags & DESC_S_MASK) != 0;
319 lhs->l = (flags >> DESC_L_SHIFT) & 1;
320 lhs->g = (flags & DESC_G_MASK) != 0;
321 lhs->avl = (flags & DESC_AVL_MASK) != 0;
325 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
327 lhs->selector = rhs->selector;
328 lhs->base = rhs->base;
329 lhs->limit = rhs->limit;
331 (rhs->type << DESC_TYPE_SHIFT)
332 | (rhs->present * DESC_P_MASK)
333 | (rhs->dpl << DESC_DPL_SHIFT)
334 | (rhs->db << DESC_B_SHIFT)
335 | (rhs->s * DESC_S_MASK)
336 | (rhs->l << DESC_L_SHIFT)
337 | (rhs->g * DESC_G_MASK)
338 | (rhs->avl * DESC_AVL_MASK);
341 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
344 *kvm_reg = *qemu_reg;
346 *qemu_reg = *kvm_reg;
349 static int kvm_getput_regs(CPUState *env, int set)
351 struct kvm_regs regs;
355 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
360 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
361 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
362 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
363 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
364 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
365 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
366 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
367 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
369 kvm_getput_reg(®s.r8, &env->regs[8], set);
370 kvm_getput_reg(®s.r9, &env->regs[9], set);
371 kvm_getput_reg(®s.r10, &env->regs[10], set);
372 kvm_getput_reg(®s.r11, &env->regs[11], set);
373 kvm_getput_reg(®s.r12, &env->regs[12], set);
374 kvm_getput_reg(®s.r13, &env->regs[13], set);
375 kvm_getput_reg(®s.r14, &env->regs[14], set);
376 kvm_getput_reg(®s.r15, &env->regs[15], set);
379 kvm_getput_reg(®s.rflags, &env->eflags, set);
380 kvm_getput_reg(®s.rip, &env->eip, set);
383 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
388 static int kvm_put_fpu(CPUState *env)
393 memset(&fpu, 0, sizeof fpu);
394 fpu.fsw = env->fpus & ~(7 << 11);
395 fpu.fsw |= (env->fpstt & 7) << 11;
397 for (i = 0; i < 8; ++i)
398 fpu.ftwx |= (!env->fptags[i]) << i;
399 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
400 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
401 fpu.mxcsr = env->mxcsr;
403 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
406 static int kvm_put_sregs(CPUState *env)
408 struct kvm_sregs sregs;
410 memcpy(sregs.interrupt_bitmap,
411 env->interrupt_bitmap,
412 sizeof(sregs.interrupt_bitmap));
414 if ((env->eflags & VM_MASK)) {
415 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
416 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
417 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
418 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
419 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
420 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
422 set_seg(&sregs.cs, &env->segs[R_CS]);
423 set_seg(&sregs.ds, &env->segs[R_DS]);
424 set_seg(&sregs.es, &env->segs[R_ES]);
425 set_seg(&sregs.fs, &env->segs[R_FS]);
426 set_seg(&sregs.gs, &env->segs[R_GS]);
427 set_seg(&sregs.ss, &env->segs[R_SS]);
429 if (env->cr[0] & CR0_PE_MASK) {
430 /* force ss cpl to cs cpl */
431 sregs.ss.selector = (sregs.ss.selector & ~3) |
432 (sregs.cs.selector & 3);
433 sregs.ss.dpl = sregs.ss.selector & 3;
437 set_seg(&sregs.tr, &env->tr);
438 set_seg(&sregs.ldt, &env->ldt);
440 sregs.idt.limit = env->idt.limit;
441 sregs.idt.base = env->idt.base;
442 sregs.gdt.limit = env->gdt.limit;
443 sregs.gdt.base = env->gdt.base;
445 sregs.cr0 = env->cr[0];
446 sregs.cr2 = env->cr[2];
447 sregs.cr3 = env->cr[3];
448 sregs.cr4 = env->cr[4];
450 sregs.cr8 = cpu_get_apic_tpr(env);
451 sregs.apic_base = cpu_get_apic_base(env);
453 sregs.efer = env->efer;
455 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
458 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
459 uint32_t index, uint64_t value)
461 entry->index = index;
465 static int kvm_put_msrs(CPUState *env)
468 struct kvm_msrs info;
469 struct kvm_msr_entry entries[100];
471 struct kvm_msr_entry *msrs = msr_data.entries;
474 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
475 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
476 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
477 if (kvm_has_msr_star(env))
478 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
479 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
481 /* FIXME if lm capable */
482 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
483 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
484 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
485 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
487 msr_data.info.nmsrs = n;
489 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
494 static int kvm_get_fpu(CPUState *env)
499 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
503 env->fpstt = (fpu.fsw >> 11) & 7;
506 for (i = 0; i < 8; ++i)
507 env->fptags[i] = !((fpu.ftwx >> i) & 1);
508 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
509 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
510 env->mxcsr = fpu.mxcsr;
515 static int kvm_get_sregs(CPUState *env)
517 struct kvm_sregs sregs;
521 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
525 memcpy(env->interrupt_bitmap,
526 sregs.interrupt_bitmap,
527 sizeof(sregs.interrupt_bitmap));
529 get_seg(&env->segs[R_CS], &sregs.cs);
530 get_seg(&env->segs[R_DS], &sregs.ds);
531 get_seg(&env->segs[R_ES], &sregs.es);
532 get_seg(&env->segs[R_FS], &sregs.fs);
533 get_seg(&env->segs[R_GS], &sregs.gs);
534 get_seg(&env->segs[R_SS], &sregs.ss);
536 get_seg(&env->tr, &sregs.tr);
537 get_seg(&env->ldt, &sregs.ldt);
539 env->idt.limit = sregs.idt.limit;
540 env->idt.base = sregs.idt.base;
541 env->gdt.limit = sregs.gdt.limit;
542 env->gdt.base = sregs.gdt.base;
544 env->cr[0] = sregs.cr0;
545 env->cr[2] = sregs.cr2;
546 env->cr[3] = sregs.cr3;
547 env->cr[4] = sregs.cr4;
549 cpu_set_apic_base(env, sregs.apic_base);
551 env->efer = sregs.efer;
552 //cpu_set_apic_tpr(env, sregs.cr8);
554 #define HFLAG_COPY_MASK ~( \
555 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
556 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
557 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
558 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
562 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
563 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
564 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
565 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
566 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
567 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
568 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
570 if (env->efer & MSR_EFER_LMA) {
571 hflags |= HF_LMA_MASK;
574 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
575 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
577 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
578 (DESC_B_SHIFT - HF_CS32_SHIFT);
579 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
580 (DESC_B_SHIFT - HF_SS32_SHIFT);
581 if (!(env->cr[0] & CR0_PE_MASK) ||
582 (env->eflags & VM_MASK) ||
583 !(hflags & HF_CS32_MASK)) {
584 hflags |= HF_ADDSEG_MASK;
586 hflags |= ((env->segs[R_DS].base |
587 env->segs[R_ES].base |
588 env->segs[R_SS].base) != 0) <<
592 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
597 static int kvm_get_msrs(CPUState *env)
600 struct kvm_msrs info;
601 struct kvm_msr_entry entries[100];
603 struct kvm_msr_entry *msrs = msr_data.entries;
607 msrs[n++].index = MSR_IA32_SYSENTER_CS;
608 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
609 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
610 if (kvm_has_msr_star(env))
611 msrs[n++].index = MSR_STAR;
612 msrs[n++].index = MSR_IA32_TSC;
614 /* FIXME lm_capable_kernel */
615 msrs[n++].index = MSR_CSTAR;
616 msrs[n++].index = MSR_KERNELGSBASE;
617 msrs[n++].index = MSR_FMASK;
618 msrs[n++].index = MSR_LSTAR;
620 msr_data.info.nmsrs = n;
621 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
625 for (i = 0; i < ret; i++) {
626 switch (msrs[i].index) {
627 case MSR_IA32_SYSENTER_CS:
628 env->sysenter_cs = msrs[i].data;
630 case MSR_IA32_SYSENTER_ESP:
631 env->sysenter_esp = msrs[i].data;
633 case MSR_IA32_SYSENTER_EIP:
634 env->sysenter_eip = msrs[i].data;
637 env->star = msrs[i].data;
641 env->cstar = msrs[i].data;
643 case MSR_KERNELGSBASE:
644 env->kernelgsbase = msrs[i].data;
647 env->fmask = msrs[i].data;
650 env->lstar = msrs[i].data;
654 env->tsc = msrs[i].data;
662 int kvm_arch_put_registers(CPUState *env)
666 ret = kvm_getput_regs(env, 1);
670 ret = kvm_put_fpu(env);
674 ret = kvm_put_sregs(env);
678 ret = kvm_put_msrs(env);
682 ret = kvm_put_mp_state(env);
686 ret = kvm_get_mp_state(env);
693 int kvm_arch_get_registers(CPUState *env)
697 ret = kvm_getput_regs(env, 0);
701 ret = kvm_get_fpu(env);
705 ret = kvm_get_sregs(env);
709 ret = kvm_get_msrs(env);
716 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
718 /* Try to inject an interrupt if the guest can accept it */
719 if (run->ready_for_interrupt_injection &&
720 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
721 (env->eflags & IF_MASK)) {
724 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
725 irq = cpu_get_pic_interrupt(env);
727 struct kvm_interrupt intr;
730 dprintf("injected interrupt %d\n", irq);
731 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
735 /* If we have an interrupt but the guest is not ready to receive an
736 * interrupt, request an interrupt window exit. This will
737 * cause a return to userspace as soon as the guest is ready to
738 * receive interrupts. */
739 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
740 run->request_interrupt_window = 1;
742 run->request_interrupt_window = 0;
744 dprintf("setting tpr\n");
745 run->cr8 = cpu_get_apic_tpr(env);
750 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
753 env->eflags |= IF_MASK;
755 env->eflags &= ~IF_MASK;
757 cpu_set_apic_tpr(env, run->cr8);
758 cpu_set_apic_base(env, run->apic_base);
763 static int kvm_handle_halt(CPUState *env)
765 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
766 (env->eflags & IF_MASK)) &&
767 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
769 env->exception_index = EXCP_HLT;
776 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
780 switch (run->exit_reason) {
782 dprintf("handle_hlt\n");
783 ret = kvm_handle_halt(env);
790 #ifdef KVM_CAP_SET_GUEST_DEBUG
791 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
793 const static uint8_t int3 = 0xcc;
795 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
796 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
801 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
805 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
806 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
817 static int nb_hw_breakpoint;
819 static int find_hw_breakpoint(target_ulong addr, int len, int type)
823 for (n = 0; n < nb_hw_breakpoint; n++)
824 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
825 (hw_breakpoint[n].len == len || len == -1))
830 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
831 target_ulong len, int type)
834 case GDB_BREAKPOINT_HW:
837 case GDB_WATCHPOINT_WRITE:
838 case GDB_WATCHPOINT_ACCESS:
845 if (addr & (len - 1))
856 if (nb_hw_breakpoint == 4)
859 if (find_hw_breakpoint(addr, len, type) >= 0)
862 hw_breakpoint[nb_hw_breakpoint].addr = addr;
863 hw_breakpoint[nb_hw_breakpoint].len = len;
864 hw_breakpoint[nb_hw_breakpoint].type = type;
870 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
871 target_ulong len, int type)
875 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
880 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
885 void kvm_arch_remove_all_hw_breakpoints(void)
887 nb_hw_breakpoint = 0;
890 static CPUWatchpoint hw_watchpoint;
892 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
897 if (arch_info->exception == 1) {
898 if (arch_info->dr6 & (1 << 14)) {
899 if (cpu_single_env->singlestep_enabled)
902 for (n = 0; n < 4; n++)
903 if (arch_info->dr6 & (1 << n))
904 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
910 cpu_single_env->watchpoint_hit = &hw_watchpoint;
911 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
912 hw_watchpoint.flags = BP_MEM_WRITE;
916 cpu_single_env->watchpoint_hit = &hw_watchpoint;
917 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
918 hw_watchpoint.flags = BP_MEM_ACCESS;
922 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
926 kvm_update_guest_debug(cpu_single_env,
927 (arch_info->exception == 1) ?
928 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
933 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
935 const uint8_t type_code[] = {
936 [GDB_BREAKPOINT_HW] = 0x0,
937 [GDB_WATCHPOINT_WRITE] = 0x1,
938 [GDB_WATCHPOINT_ACCESS] = 0x3
940 const uint8_t len_code[] = {
941 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
945 if (kvm_sw_breakpoints_active(env))
946 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
948 if (nb_hw_breakpoint > 0) {
949 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
950 dbg->arch.debugreg[7] = 0x0600;
951 for (n = 0; n < nb_hw_breakpoint; n++) {
952 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
953 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
954 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
955 (len_code[hw_breakpoint[n].len] << (18 + n*4));
959 #endif /* KVM_CAP_SET_GUEST_DEBUG */