+int kvm_sync_vcpus(void)
+{
+ CPUState *env;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu) {
+ int ret;
+
+ ret = kvm_arch_put_registers(env);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * dirty pages logging control
+ */
+static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
+ ram_addr_t size, unsigned flags,
+ unsigned mask)
+{
+ KVMState *s = kvm_state;
+ KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
+ if (mem == NULL) {
+ fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
+ TARGET_FMT_plx "\n", __func__, phys_addr,
+ phys_addr + size - 1);
+ return -EINVAL;
+ }
+
+ flags = (mem->flags & ~mask) | flags;
+ /* Nothing changed, no need to issue ioctl */
+ if (flags == mem->flags)
+ return 0;
+
+ mem->flags = flags;
+
+ return kvm_set_user_memory_region(s, mem);
+}
+
+int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ return kvm_dirty_pages_log_change(phys_addr, size,
+ KVM_MEM_LOG_DIRTY_PAGES,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ return kvm_dirty_pages_log_change(phys_addr, size,
+ 0,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+/**
+ * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
+ * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
+ * This means all bits are set to dirty.
+ *
+ * @start_add: start of logged region.
+ * @end_addr: end of logged region.
+ */
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
+ target_phys_addr_t end_addr)
+{
+ KVMState *s = kvm_state;
+ KVMDirtyLog d;
+ KVMSlot *mem = kvm_lookup_matching_slot(s, start_addr, end_addr);
+ unsigned long alloc_size;
+ ram_addr_t addr;
+ target_phys_addr_t phys_addr = start_addr;
+
+ dprintf("sync addr: " TARGET_FMT_lx " into %lx\n", start_addr,
+ mem->phys_offset);
+ if (mem == NULL) {
+ fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
+ TARGET_FMT_plx "\n", __func__, phys_addr, end_addr - 1);
+ return;
+ }
+
+ alloc_size = mem->memory_size >> TARGET_PAGE_BITS / sizeof(d.dirty_bitmap);
+ d.dirty_bitmap = qemu_mallocz(alloc_size);
+
+ d.slot = mem->slot;
+ dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
+ d.slot, mem->start_addr, mem->phys_offset);
+
+ if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
+ dprintf("ioctl failed %d\n", errno);
+ goto out;
+ }
+
+ phys_addr = start_addr;
+ for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
+ unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
+ unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS;
+ unsigned word = nr / (sizeof(*bitmap) * 8);
+ unsigned bit = nr % (sizeof(*bitmap) * 8);
+ if ((bitmap[word] >> bit) & 1)
+ cpu_physical_memory_set_dirty(addr);
+ }
+out:
+ qemu_free(d.dirty_bitmap);
+}
+
+int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+{
+ int ret = -ENOSYS;
+#ifdef KVM_CAP_COALESCED_MMIO
+ KVMState *s = kvm_state;
+
+ if (s->coalesced_mmio) {
+ struct kvm_coalesced_mmio_zone zone;
+
+ zone.addr = start;
+ zone.size = size;
+
+ ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
+ }
+#endif
+
+ return ret;
+}
+
+int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+{
+ int ret = -ENOSYS;
+#ifdef KVM_CAP_COALESCED_MMIO
+ KVMState *s = kvm_state;
+
+ if (s->coalesced_mmio) {
+ struct kvm_coalesced_mmio_zone zone;
+
+ zone.addr = start;
+ zone.size = size;
+
+ ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+ }
+#endif
+
+ return ret;
+}
+
+int kvm_check_extension(KVMState *s, unsigned int extension)
+{
+ int ret;
+
+ ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
+ if (ret < 0) {
+ ret = 0;
+ }
+
+ return ret;
+}
+