+int kvm_put_mp_state(CPUState *env)
+{
+ struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
+
+ return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
+}
+
+int kvm_get_mp_state(CPUState *env)
+{
+ struct kvm_mp_state mp_state;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
+ if (ret < 0) {
+ return ret;
+ }
+ env->mp_state = mp_state.mp_state;
+ return 0;
+}
+
+int kvm_sync_vcpus(void)
+{
+ CPUState *env;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu) {
+ int ret;
+
+ ret = kvm_arch_put_registers(env);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * dirty pages logging control
+ */
+static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
+ ram_addr_t size, int flags, int mask)
+{
+ KVMState *s = kvm_state;
+ KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
+ int old_flags;
+
+ if (mem == NULL) {
+ fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
+ TARGET_FMT_plx "\n", __func__, phys_addr,
+ phys_addr + size - 1);
+ return -EINVAL;
+ }
+
+ old_flags = mem->flags;
+
+ flags = (mem->flags & ~mask) | flags;
+ mem->flags = flags;
+
+ /* If nothing changed effectively, no need to issue ioctl */
+ if (s->migration_log) {
+ flags |= KVM_MEM_LOG_DIRTY_PAGES;
+ }
+ if (flags == old_flags) {
+ return 0;
+ }
+
+ return kvm_set_user_memory_region(s, mem);
+}
+
+int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ return kvm_dirty_pages_log_change(phys_addr, size,
+ KVM_MEM_LOG_DIRTY_PAGES,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ return kvm_dirty_pages_log_change(phys_addr, size,
+ 0,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_set_migration_log(int enable)
+{
+ KVMState *s = kvm_state;
+ KVMSlot *mem;
+ int i, err;
+
+ s->migration_log = enable;
+
+ for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ mem = &s->slots[i];
+
+ if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
+ continue;
+ }
+ err = kvm_set_user_memory_region(s, mem);
+ if (err) {
+ return err;
+ }
+ }
+ return 0;
+}
+
+/**
+ * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
+ * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
+ * This means all bits are set to dirty.
+ *
+ * @start_add: start of logged region.
+ * @end_addr: end of logged region.
+ */
+int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
+ target_phys_addr_t end_addr)
+{
+ KVMState *s = kvm_state;
+ unsigned long size, allocated_size = 0;
+ target_phys_addr_t phys_addr;
+ ram_addr_t addr;
+ KVMDirtyLog d;
+ KVMSlot *mem;
+ int ret = 0;
+
+ d.dirty_bitmap = NULL;
+ while (start_addr < end_addr) {
+ mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
+ if (mem == NULL) {
+ break;
+ }
+
+ size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
+ if (!d.dirty_bitmap) {
+ d.dirty_bitmap = qemu_malloc(size);
+ } else if (size > allocated_size) {
+ d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size);
+ }
+ allocated_size = size;
+ memset(d.dirty_bitmap, 0, allocated_size);
+
+ d.slot = mem->slot;
+
+ if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
+ dprintf("ioctl failed %d\n", errno);
+ ret = -1;
+ break;
+ }
+
+ for (phys_addr = mem->start_addr, addr = mem->phys_offset;
+ phys_addr < mem->start_addr + mem->memory_size;
+ phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
+ unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
+ unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
+ unsigned word = nr / (sizeof(*bitmap) * 8);
+ unsigned bit = nr % (sizeof(*bitmap) * 8);
+
+ if ((bitmap[word] >> bit) & 1) {
+ cpu_physical_memory_set_dirty(addr);
+ }
+ }
+ start_addr = phys_addr;
+ }
+ qemu_free(d.dirty_bitmap);
+
+ return ret;
+}
+
+int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+{
+ int ret = -ENOSYS;
+#ifdef KVM_CAP_COALESCED_MMIO
+ KVMState *s = kvm_state;
+
+ if (s->coalesced_mmio) {
+ struct kvm_coalesced_mmio_zone zone;
+
+ zone.addr = start;
+ zone.size = size;
+
+ ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
+ }
+#endif
+
+ return ret;
+}
+
+int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+{
+ int ret = -ENOSYS;
+#ifdef KVM_CAP_COALESCED_MMIO
+ KVMState *s = kvm_state;
+
+ if (s->coalesced_mmio) {
+ struct kvm_coalesced_mmio_zone zone;
+
+ zone.addr = start;
+ zone.size = size;
+
+ ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+ }
+#endif
+
+ return ret;
+}
+
+int kvm_check_extension(KVMState *s, unsigned int extension)
+{
+ int ret;
+
+ ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
+ if (ret < 0) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void kvm_reset_vcpus(void *opaque)
+{
+ kvm_sync_vcpus();
+}
+