Sync with latest wip/qemu/patch-nvmm-support. - pkgsrc-localpatches - leot's pkgsrc LOCALPATCHES
 (HTM) hg clone https://bitbucket.org/iamleot/pkgsrc-localpatches
 (DIR) Log
 (DIR) Files
 (DIR) Refs
       ---
 (DIR) changeset e7d999a1d9e14399607b123f66b5e65e9b63bb80
 (DIR) parent 795b62f7041eae427303880518cb7d1d27d6abab
 (HTM) Author: Leonardo Taccari <iamleot@gmail.com>
       Date:   Thu, 13 Jun 2019 21:17:58 
       
       Sync with latest wip/qemu/patch-nvmm-support.
       
       Diffstat:
        emulators/qemu/patch-nvmm-support |  468 ++++++++++++++++++-------------------
        1 files changed, 233 insertions(+), 235 deletions(-)
       ---
       diff -r 795b62f7041e -r e7d999a1d9e1 emulators/qemu/patch-nvmm-support
       --- a/emulators/qemu/patch-nvmm-support Tue Jun 11 15:38:52 2019 +0200
       +++ b/emulators/qemu/patch-nvmm-support Thu Jun 13 21:17:58 2019 +0200
       @@ -373,7 +373,7 @@
         
                 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
        --- target/i386/Makefile.objs  2018-12-11 18:44:34.000000000 +0100
       -+++ target/i386/Makefile.objs  2019-03-21 20:48:31.775998305 +0100
       ++++ target/i386/Makefile.objs  2019-05-01 11:53:33.094579452 +0200
        @@ -17,6 +17,7 @@
         endif
         obj-$(CONFIG_HVF) += hvf/
       @@ -383,8 +383,8 @@
         obj-$(CONFIG_SEV) += sev.o
         obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o
        --- target/i386/nvmm-all.c     1970-01-01 01:00:00.000000000 +0100
       -+++ target/i386/nvmm-all.c     2019-05-11 09:04:16.041517843 +0200
       -@@ -0,0 +1,1170 @@
       ++++ target/i386/nvmm-all.c     2019-06-10 11:57:43.612013641 +0200
       +@@ -0,0 +1,1168 @@
        +/*
        + * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
        + *
       @@ -413,10 +413,8 @@
        +
        +#include <nvmm.h>
        +
       -+static bool nvmm_allowed = false;
       -+
       -+struct nvmm_vcpu {
       -+    nvmm_cpuid_t cpuid;
       ++struct qemu_vcpu {
       ++    struct nvmm_vcpu vcpu;
        +    uint8_t tpr;
        +    bool stop;
        +
       @@ -428,20 +426,25 @@
        +    bool int_shadow;
        +};
        +
       -+static struct {
       ++struct qemu_machine {
        +    struct nvmm_machine mach;
       -+} nvmm_global;
       ++};
       ++
       ++/* -------------------------------------------------------------------------- */
        +
       -+static struct nvmm_vcpu *
       -+get_nvmm_vcpu(CPUState *cpu)
       ++static bool nvmm_allowed = false;
       ++static struct qemu_machine qemu_mach;
       ++
       ++static struct qemu_vcpu *
       ++get_qemu_vcpu(CPUState *cpu)
        +{
       -+    return (struct nvmm_vcpu *)cpu->hax_vcpu;
       ++    return (struct qemu_vcpu *)cpu->hax_vcpu;
        +}
        +
        +static struct nvmm_machine *
        +get_nvmm_mach(void)
        +{
       -+    return &nvmm_global.mach;
       ++    return &qemu_mach.mach;
        +}
        +
        +/* -------------------------------------------------------------------------- */
       @@ -469,8 +472,9 @@
        +{
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
        +    struct nvmm_machine *mach = get_nvmm_mach();
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       -+    struct nvmm_x64_state state;
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
       ++    struct nvmm_x64_state *state = vcpu->state;
        +    uint64_t bitmap;
        +    size_t i;
        +    int ret;
       @@ -478,92 +482,92 @@
        +    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
        +
        +    /* GPRs. */
       -+    state.gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX];
       -+    state.gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX];
       -+    state.gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX];
       -+    state.gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX];
       -+    state.gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP];
       -+    state.gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP];
       -+    state.gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI];
       -+    state.gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI];
       -+    state.gprs[NVMM_X64_GPR_R8]  = env->regs[R_R8];
       -+    state.gprs[NVMM_X64_GPR_R9]  = env->regs[R_R9];
       -+    state.gprs[NVMM_X64_GPR_R10] = env->regs[R_R10];
       -+    state.gprs[NVMM_X64_GPR_R11] = env->regs[R_R11];
       -+    state.gprs[NVMM_X64_GPR_R12] = env->regs[R_R12];
       -+    state.gprs[NVMM_X64_GPR_R13] = env->regs[R_R13];
       -+    state.gprs[NVMM_X64_GPR_R14] = env->regs[R_R14];
       -+    state.gprs[NVMM_X64_GPR_R15] = env->regs[R_R15];
       ++    state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX];
       ++    state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX];
       ++    state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX];
       ++    state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX];
       ++    state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP];
       ++    state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP];
       ++    state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI];
       ++    state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI];
       ++    state->gprs[NVMM_X64_GPR_R8]  = env->regs[R_R8];
       ++    state->gprs[NVMM_X64_GPR_R9]  = env->regs[R_R9];
       ++    state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10];
       ++    state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11];
       ++    state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12];
       ++    state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13];
       ++    state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14];
       ++    state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15];
        +
        +    /* RIP and RFLAGS. */
       -+    state.gprs[NVMM_X64_GPR_RIP] = env->eip;
       -+    state.gprs[NVMM_X64_GPR_RFLAGS] = env->eflags;
       ++    state->gprs[NVMM_X64_GPR_RIP] = env->eip;
       ++    state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags;
        +
        +    /* Segments. */
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
        +
        +    /* Special segments. */
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_GDT], &env->gdt);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_LDT], &env->ldt);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_TR], &env->tr);
       -+    nvmm_set_segment(&state.segs[NVMM_X64_SEG_IDT], &env->idt);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr);
       ++    nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt);
        +
        +    /* Control registers. */
       -+    state.crs[NVMM_X64_CR_CR0] = env->cr[0];
       -+    state.crs[NVMM_X64_CR_CR2] = env->cr[2];
       -+    state.crs[NVMM_X64_CR_CR3] = env->cr[3];
       -+    state.crs[NVMM_X64_CR_CR4] = env->cr[4];
       -+    state.crs[NVMM_X64_CR_CR8] = vcpu->tpr;
       -+    state.crs[NVMM_X64_CR_XCR0] = env->xcr0;
       ++    state->crs[NVMM_X64_CR_CR0] = env->cr[0];
       ++    state->crs[NVMM_X64_CR_CR2] = env->cr[2];
       ++    state->crs[NVMM_X64_CR_CR3] = env->cr[3];
       ++    state->crs[NVMM_X64_CR_CR4] = env->cr[4];
       ++    state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
       ++    state->crs[NVMM_X64_CR_XCR0] = env->xcr0;
        +
        +    /* Debug registers. */
       -+    state.drs[NVMM_X64_DR_DR0] = env->dr[0];
       -+    state.drs[NVMM_X64_DR_DR1] = env->dr[1];
       -+    state.drs[NVMM_X64_DR_DR2] = env->dr[2];
       -+    state.drs[NVMM_X64_DR_DR3] = env->dr[3];
       -+    state.drs[NVMM_X64_DR_DR6] = env->dr[6];
       -+    state.drs[NVMM_X64_DR_DR7] = env->dr[7];
       ++    state->drs[NVMM_X64_DR_DR0] = env->dr[0];
       ++    state->drs[NVMM_X64_DR_DR1] = env->dr[1];
       ++    state->drs[NVMM_X64_DR_DR2] = env->dr[2];
       ++    state->drs[NVMM_X64_DR_DR3] = env->dr[3];
       ++    state->drs[NVMM_X64_DR_DR6] = env->dr[6];
       ++    state->drs[NVMM_X64_DR_DR7] = env->dr[7];
        +
        +    /* FPU. */
       -+    state.fpu.fx_cw = env->fpuc;
       -+    state.fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
       -+    state.fpu.fx_tw = 0;
       ++    state->fpu.fx_cw = env->fpuc;
       ++    state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
       ++    state->fpu.fx_tw = 0;
        +    for (i = 0; i < 8; i++) {
       -+        state.fpu.fx_tw |= (!env->fptags[i]) << i;
       ++        state->fpu.fx_tw |= (!env->fptags[i]) << i;
        +    }
       -+    state.fpu.fx_opcode = env->fpop;
       -+    state.fpu.fx_ip.fa_64 = env->fpip;
       -+    state.fpu.fx_dp.fa_64 = env->fpdp;
       -+    state.fpu.fx_mxcsr = env->mxcsr;
       -+    state.fpu.fx_mxcsr_mask = 0x0000FFFF;
       -+    assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
       -+    memcpy(state.fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
       ++    state->fpu.fx_opcode = env->fpop;
       ++    state->fpu.fx_ip.fa_64 = env->fpip;
       ++    state->fpu.fx_dp.fa_64 = env->fpdp;
       ++    state->fpu.fx_mxcsr = env->mxcsr;
       ++    state->fpu.fx_mxcsr_mask = 0x0000FFFF;
       ++    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
       ++    memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
        +    for (i = 0; i < 16; i++) {
       -+        memcpy(&state.fpu.fx_xmm[i].xmm_bytes[0],
       ++        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0],
        +            &env->xmm_regs[i].ZMM_Q(0), 8);
       -+        memcpy(&state.fpu.fx_xmm[i].xmm_bytes[8],
       ++        memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8],
        +            &env->xmm_regs[i].ZMM_Q(1), 8);
        +    }
        +
        +    /* MSRs. */
       -+    state.msrs[NVMM_X64_MSR_EFER] = env->efer;
       -+    state.msrs[NVMM_X64_MSR_STAR] = env->star;
       ++    state->msrs[NVMM_X64_MSR_EFER] = env->efer;
       ++    state->msrs[NVMM_X64_MSR_STAR] = env->star;
        +#ifdef TARGET_X86_64
       -+    state.msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
       -+    state.msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
       -+    state.msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
       -+    state.msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
       ++    state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
       ++    state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
       ++    state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
       ++    state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
        +#endif
       -+    state.msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
       -+    state.msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
       -+    state.msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
       -+    state.msrs[NVMM_X64_MSR_PAT] = env->pat;
       -+    state.msrs[NVMM_X64_MSR_TSC] = env->tsc;
       ++    state->msrs[NVMM_X64_MSR_SYSENTER_CS]  = env->sysenter_cs;
       ++    state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
       ++    state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
       ++    state->msrs[NVMM_X64_MSR_PAT] = env->pat;
       ++    state->msrs[NVMM_X64_MSR_TSC] = env->tsc;
        +
        +    bitmap =
        +        NVMM_X64_STATE_SEGS |
       @@ -573,7 +577,7 @@
        +        NVMM_X64_STATE_MSRS |
        +        NVMM_X64_STATE_FPU;
        +
       -+    ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state, bitmap);
       ++    ret = nvmm_vcpu_setstate(mach, vcpu, bitmap);
        +    if (ret == -1) {
        +        error_report("NVMM: Failed to set virtual processor context,"
        +            " error=%d", errno);
       @@ -603,9 +607,10 @@
        +{
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
        +    struct nvmm_machine *mach = get_nvmm_mach();
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
        +    X86CPU *x86_cpu = X86_CPU(cpu);
       -+    struct nvmm_x64_state state;
       ++    struct nvmm_x64_state *state = vcpu->state;
        +    uint64_t bitmap, tpr;
        +    size_t i;
        +    int ret;
       @@ -620,102 +625,102 @@
        +        NVMM_X64_STATE_MSRS |
        +        NVMM_X64_STATE_FPU;
        +
       -+    ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state, bitmap);
       ++    ret = nvmm_vcpu_getstate(mach, vcpu, bitmap);
        +    if (ret == -1) {
        +        error_report("NVMM: Failed to get virtual processor context,"
        +            " error=%d", errno);
        +    }
        +
        +    /* GPRs. */
       -+    env->regs[R_EAX] = state.gprs[NVMM_X64_GPR_RAX];
       -+    env->regs[R_ECX] = state.gprs[NVMM_X64_GPR_RCX];
       -+    env->regs[R_EDX] = state.gprs[NVMM_X64_GPR_RDX];
       -+    env->regs[R_EBX] = state.gprs[NVMM_X64_GPR_RBX];
       -+    env->regs[R_ESP] = state.gprs[NVMM_X64_GPR_RSP];
       -+    env->regs[R_EBP] = state.gprs[NVMM_X64_GPR_RBP];
       -+    env->regs[R_ESI] = state.gprs[NVMM_X64_GPR_RSI];
       -+    env->regs[R_EDI] = state.gprs[NVMM_X64_GPR_RDI];
       -+    env->regs[R_R8]  = state.gprs[NVMM_X64_GPR_R8];
       -+    env->regs[R_R9]  = state.gprs[NVMM_X64_GPR_R9];
       -+    env->regs[R_R10] = state.gprs[NVMM_X64_GPR_R10];
       -+    env->regs[R_R11] = state.gprs[NVMM_X64_GPR_R11];
       -+    env->regs[R_R12] = state.gprs[NVMM_X64_GPR_R12];
       -+    env->regs[R_R13] = state.gprs[NVMM_X64_GPR_R13];
       -+    env->regs[R_R14] = state.gprs[NVMM_X64_GPR_R14];
       -+    env->regs[R_R15] = state.gprs[NVMM_X64_GPR_R15];
       ++    env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX];
       ++    env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX];
       ++    env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX];
       ++    env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX];
       ++    env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP];
       ++    env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP];
       ++    env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI];
       ++    env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI];
       ++    env->regs[R_R8]  = state->gprs[NVMM_X64_GPR_R8];
       ++    env->regs[R_R9]  = state->gprs[NVMM_X64_GPR_R9];
       ++    env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10];
       ++    env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11];
       ++    env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12];
       ++    env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13];
       ++    env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14];
       ++    env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15];
        +
        +    /* RIP and RFLAGS. */
       -+    env->eip = state.gprs[NVMM_X64_GPR_RIP];
       -+    env->eflags = state.gprs[NVMM_X64_GPR_RFLAGS];
       ++    env->eip = state->gprs[NVMM_X64_GPR_RIP];
       ++    env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS];
        +
        +    /* Segments. */
       -+    nvmm_get_segment(&env->segs[R_ES], &state.segs[NVMM_X64_SEG_ES]);
       -+    nvmm_get_segment(&env->segs[R_CS], &state.segs[NVMM_X64_SEG_CS]);
       -+    nvmm_get_segment(&env->segs[R_SS], &state.segs[NVMM_X64_SEG_SS]);
       -+    nvmm_get_segment(&env->segs[R_DS], &state.segs[NVMM_X64_SEG_DS]);
       -+    nvmm_get_segment(&env->segs[R_FS], &state.segs[NVMM_X64_SEG_FS]);
       -+    nvmm_get_segment(&env->segs[R_GS], &state.segs[NVMM_X64_SEG_GS]);
       ++    nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]);
       ++    nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]);
       ++    nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]);
       ++    nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]);
       ++    nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]);
       ++    nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]);
        +
        +    /* Special segments. */
       -+    nvmm_get_segment(&env->gdt, &state.segs[NVMM_X64_SEG_GDT]);
       -+    nvmm_get_segment(&env->ldt, &state.segs[NVMM_X64_SEG_LDT]);
       -+    nvmm_get_segment(&env->tr, &state.segs[NVMM_X64_SEG_TR]);
       -+    nvmm_get_segment(&env->idt, &state.segs[NVMM_X64_SEG_IDT]);
       ++    nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]);
       ++    nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]);
       ++    nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]);
       ++    nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]);
        +
        +    /* Control registers. */
       -+    env->cr[0] = state.crs[NVMM_X64_CR_CR0];
       -+    env->cr[2] = state.crs[NVMM_X64_CR_CR2];
       -+    env->cr[3] = state.crs[NVMM_X64_CR_CR3];
       -+    env->cr[4] = state.crs[NVMM_X64_CR_CR4];
       -+    tpr = state.crs[NVMM_X64_CR_CR8];
       -+    if (tpr != vcpu->tpr) {
       -+        vcpu->tpr = tpr;
       ++    env->cr[0] = state->crs[NVMM_X64_CR_CR0];
       ++    env->cr[2] = state->crs[NVMM_X64_CR_CR2];
       ++    env->cr[3] = state->crs[NVMM_X64_CR_CR3];
       ++    env->cr[4] = state->crs[NVMM_X64_CR_CR4];
       ++    tpr = state->crs[NVMM_X64_CR_CR8];
       ++    if (tpr != qcpu->tpr) {
       ++        qcpu->tpr = tpr;
        +        cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
        +    }
       -+    env->xcr0 = state.crs[NVMM_X64_CR_XCR0];
       ++    env->xcr0 = state->crs[NVMM_X64_CR_XCR0];
        +
        +    /* Debug registers. */
       -+    env->dr[0] = state.drs[NVMM_X64_DR_DR0];
       -+    env->dr[1] = state.drs[NVMM_X64_DR_DR1];
       -+    env->dr[2] = state.drs[NVMM_X64_DR_DR2];
       -+    env->dr[3] = state.drs[NVMM_X64_DR_DR3];
       -+    env->dr[6] = state.drs[NVMM_X64_DR_DR6];
       -+    env->dr[7] = state.drs[NVMM_X64_DR_DR7];
       ++    env->dr[0] = state->drs[NVMM_X64_DR_DR0];
       ++    env->dr[1] = state->drs[NVMM_X64_DR_DR1];
       ++    env->dr[2] = state->drs[NVMM_X64_DR_DR2];
       ++    env->dr[3] = state->drs[NVMM_X64_DR_DR3];
       ++    env->dr[6] = state->drs[NVMM_X64_DR_DR6];
       ++    env->dr[7] = state->drs[NVMM_X64_DR_DR7];
        +
        +    /* FPU. */
       -+    env->fpuc = state.fpu.fx_cw;
       -+    env->fpstt = (state.fpu.fx_sw >> 11) & 0x7;
       -+    env->fpus = state.fpu.fx_sw & ~0x3800;
       ++    env->fpuc = state->fpu.fx_cw;
       ++    env->fpstt = (state->fpu.fx_sw >> 11) & 0x7;
       ++    env->fpus = state->fpu.fx_sw & ~0x3800;
        +    for (i = 0; i < 8; i++) {
       -+        env->fptags[i] = !((state.fpu.fx_tw >> i) & 1);
       ++        env->fptags[i] = !((state->fpu.fx_tw >> i) & 1);
        +    }
       -+    env->fpop = state.fpu.fx_opcode;
       -+    env->fpip = state.fpu.fx_ip.fa_64;
       -+    env->fpdp = state.fpu.fx_dp.fa_64;
       -+    env->mxcsr = state.fpu.fx_mxcsr;
       -+    assert(sizeof(state.fpu.fx_87_ac) == sizeof(env->fpregs));
       -+    memcpy(env->fpregs, state.fpu.fx_87_ac, sizeof(env->fpregs));
       ++    env->fpop = state->fpu.fx_opcode;
       ++    env->fpip = state->fpu.fx_ip.fa_64;
       ++    env->fpdp = state->fpu.fx_dp.fa_64;
       ++    env->mxcsr = state->fpu.fx_mxcsr;
       ++    assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
       ++    memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs));
        +    for (i = 0; i < 16; i++) {
        +        memcpy(&env->xmm_regs[i].ZMM_Q(0),
       -+            &state.fpu.fx_xmm[i].xmm_bytes[0], 8);
       ++            &state->fpu.fx_xmm[i].xmm_bytes[0], 8);
        +        memcpy(&env->xmm_regs[i].ZMM_Q(1),
       -+            &state.fpu.fx_xmm[i].xmm_bytes[8], 8);
       ++            &state->fpu.fx_xmm[i].xmm_bytes[8], 8);
        +    }
        +
        +    /* MSRs. */
       -+    env->efer = state.msrs[NVMM_X64_MSR_EFER];
       -+    env->star = state.msrs[NVMM_X64_MSR_STAR];
       ++    env->efer = state->msrs[NVMM_X64_MSR_EFER];
       ++    env->star = state->msrs[NVMM_X64_MSR_STAR];
        +#ifdef TARGET_X86_64
       -+    env->lstar = state.msrs[NVMM_X64_MSR_LSTAR];
       -+    env->cstar = state.msrs[NVMM_X64_MSR_CSTAR];
       -+    env->fmask = state.msrs[NVMM_X64_MSR_SFMASK];
       -+    env->kernelgsbase = state.msrs[NVMM_X64_MSR_KERNELGSBASE];
       ++    env->lstar = state->msrs[NVMM_X64_MSR_LSTAR];
       ++    env->cstar = state->msrs[NVMM_X64_MSR_CSTAR];
       ++    env->fmask = state->msrs[NVMM_X64_MSR_SFMASK];
       ++    env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE];
        +#endif
       -+    env->sysenter_cs  = state.msrs[NVMM_X64_MSR_SYSENTER_CS];
       -+    env->sysenter_esp = state.msrs[NVMM_X64_MSR_SYSENTER_ESP];
       -+    env->sysenter_eip = state.msrs[NVMM_X64_MSR_SYSENTER_EIP];
       -+    env->pat = state.msrs[NVMM_X64_MSR_PAT];
       -+    env->tsc = state.msrs[NVMM_X64_MSR_TSC];
       ++    env->sysenter_cs  = state->msrs[NVMM_X64_MSR_SYSENTER_CS];
       ++    env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
       ++    env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
       ++    env->pat = state->msrs[NVMM_X64_MSR_PAT];
       ++    env->tsc = state->msrs[NVMM_X64_MSR_TSC];
        +
        +    x86_update_hflags(env);
        +}
       @@ -724,22 +729,21 @@
        +nvmm_can_take_int(CPUState *cpu)
        +{
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
        +    struct nvmm_machine *mach = get_nvmm_mach();
        +
       -+    if (vcpu->int_window_exit) {
       ++    if (qcpu->int_window_exit) {
        +        return false;
        +    }
        +
       -+    if (vcpu->int_shadow || (!(env->eflags & IF_MASK))) {
       -+        struct nvmm_x64_state state;
       ++    if (qcpu->int_shadow || (!(env->eflags & IF_MASK))) {
       ++        struct nvmm_x64_state *state = vcpu->state;
        +
        +        /* Exit on interrupt window. */
       -+        nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
       -+            NVMM_X64_STATE_INTR);
       -+        state.intr.int_window_exiting = 1;
       -+        nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
       -+            NVMM_X64_STATE_INTR);
       ++        nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR);
       ++        state->intr.int_window_exiting = 1;
       ++        nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR);
        +
        +        return false;
        +    }
       @@ -750,14 +754,14 @@
        +static bool
        +nvmm_can_take_nmi(CPUState *cpu)
        +{
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
        +
        +    /*
        +     * Contrary to INTs, NMIs always schedule an exit when they are
        +     * completed. Therefore, if window-exiting is enabled, it means
        +     * NMIs are blocked.
        +     */
       -+    if (vcpu->nmi_window_exit) {
       ++    if (qcpu->nmi_window_exit) {
        +        return false;
        +    }
        +
       @@ -773,22 +777,21 @@
        +{
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
        +    struct nvmm_machine *mach = get_nvmm_mach();
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
        +    X86CPU *x86_cpu = X86_CPU(cpu);
       -+    struct nvmm_x64_state state;
       -+    struct nvmm_event event;
       ++    struct nvmm_x64_state *state = vcpu->state;
       ++    struct nvmm_event *event = vcpu->event;
        +    bool has_event = false;
        +    bool sync_tpr = false;
        +    uint8_t tpr;
        +    int ret;
        +
       -+    memset(&event, 0, sizeof(event));
       -+
        +    qemu_mutex_lock_iothread();
        +
        +    tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
       -+    if (tpr != vcpu->tpr) {
       -+        vcpu->tpr = tpr;
       ++    if (tpr != qcpu->tpr) {
       ++        qcpu->tpr = tpr;
        +        sync_tpr = true;
        +    }
        +
       @@ -803,8 +806,8 @@
        +    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
        +        if (nvmm_can_take_nmi(cpu)) {
        +            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
       -+            event.type = NVMM_EVENT_INTERRUPT_HW;
       -+            event.vector = 2;
       ++            event->type = NVMM_EVENT_INTERRUPT_HW;
       ++            event->vector = 2;
        +            has_event = true;
        +        }
        +    }
       @@ -812,8 +815,8 @@
        +    if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
        +        if (nvmm_can_take_int(cpu)) {
        +            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
       -+            event.type = NVMM_EVENT_INTERRUPT_HW;
       -+            event.vector = cpu_get_pic_interrupt(env);
       ++            event->type = NVMM_EVENT_INTERRUPT_HW;
       ++            event->vector = cpu_get_pic_interrupt(env);
        +            has_event = true;
        +        }
        +    }
       @@ -824,17 +827,15 @@
        +    }
        +
        +    if (sync_tpr) {
       -+        ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
       -+            NVMM_X64_STATE_CRS);
       ++        ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS);
        +        if (ret == -1) {
        +            error_report("NVMM: Failed to get CPU state,"
        +                " error=%d", errno);
        +        }
        +
       -+        state.crs[NVMM_X64_CR_CR8] = vcpu->tpr;
       ++        state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
        +
       -+        ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
       -+            NVMM_X64_STATE_CRS);
       ++        ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS);
        +        if (ret == -1) {
        +            error_report("NVMM: Failed to set CPU state,"
        +                " error=%d", errno);
       @@ -842,7 +843,7 @@
        +    }
        +
        +    if (has_event) {
       -+        ret = nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
       ++        ret = nvmm_vcpu_inject(mach, vcpu);
        +        if (ret == -1) {
        +            error_report("NVMM: Failed to inject event,"
        +                " error=%d", errno);
       @@ -859,25 +860,25 @@
        +static void
        +nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_exit *exit)
        +{
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
        +    X86CPU *x86_cpu = X86_CPU(cpu);
        +    uint64_t tpr;
        +
        +    env->eflags = exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS];
        +
       -+    vcpu->int_shadow =
       ++    qcpu->int_shadow =
        +        exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW];
       -+    vcpu->int_window_exit =
       ++    qcpu->int_window_exit =
        +        exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT];
       -+    vcpu->nmi_window_exit =
       ++    qcpu->nmi_window_exit =
        +        exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT];
        +
        +    tpr = exit->exitstate[NVMM_X64_EXITSTATE_CR8];
       -+    if (vcpu->tpr != tpr) {
       -+        vcpu->tpr = tpr;
       ++    if (qcpu->tpr != tpr) {
       ++        qcpu->tpr = tpr;
        +        qemu_mutex_lock_iothread();
       -+        cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
       ++        cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
        +        qemu_mutex_unlock_iothread();
        +    }
        +}
       @@ -919,30 +920,28 @@
        +/* -------------------------------------------------------------------------- */
        +
        +static int
       -+nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
       -+    struct nvmm_exit *exit)
       ++nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
        +{
        +    int ret;
        +
       -+    ret = nvmm_assist_mem(mach, vcpu->cpuid, exit);
       ++    ret = nvmm_assist_mem(mach, vcpu);
        +    if (ret == -1) {
        +        error_report("NVMM: Mem Assist Failed [gpa=%p]",
       -+            (void *)exit->u.mem.gpa);
       ++            (void *)vcpu->exit->u.mem.gpa);
        +    }
        +
        +    return ret;
        +}
        +
        +static int
       -+nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
       -+    struct nvmm_exit *exit)
       ++nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
        +{
        +    int ret;
        +
       -+    ret = nvmm_assist_io(mach, vcpu->cpuid, exit);
       ++    ret = nvmm_assist_io(mach, vcpu);
        +    if (ret == -1) {
        +        error_report("NVMM: I/O Assist Failed [port=%d]",
       -+            (int)exit->u.io.port);
       ++            (int)vcpu->exit->u.io.port);
        +    }
        +
        +    return ret;
       @@ -952,9 +951,10 @@
        +nvmm_handle_msr(struct nvmm_machine *mach, CPUState *cpu,
        +    struct nvmm_exit *exit)
        +{
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
        +    X86CPU *x86_cpu = X86_CPU(cpu);
       -+    struct nvmm_x64_state state;
       ++    struct nvmm_x64_state *state = vcpu->state;
        +    uint64_t val;
        +    int ret;
        +
       @@ -970,29 +970,27 @@
        +        break;
        +    default:
        +        // TODO: more MSRs to add?
       -+        error_report("NVMM: Unexpected MSR 0x%lx, ignored",
       -+            exit->u.msr.msr);
        +        if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
        +            val = 0;
        +        }
       ++        error_report("NVMM: Unexpected %sMSR 0x%lx [val=%lx], ignored",
       ++            (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) ? "RD" : "WR",
       ++            exit->u.msr.msr, val);
        +        break;
        +    }
        +
       -+    ret = nvmm_vcpu_getstate(mach, vcpu->cpuid, &state,
       -+        NVMM_X64_STATE_GPRS);
       ++    ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS);
        +    if (ret == -1) {
        +        return -1;
        +    }
        +
        +    if (exit->u.msr.type == NVMM_EXIT_MSR_RDMSR) {
       -+        state.gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
       -+        state.gprs[NVMM_X64_GPR_RDX] = (val >> 32);
       ++        state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
       ++        state->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
        +    }
       ++    state->gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
        +
       -+    state.gprs[NVMM_X64_GPR_RIP] = exit->u.msr.npc;
       -+
       -+    ret = nvmm_vcpu_setstate(mach, vcpu->cpuid, &state,
       -+        NVMM_X64_STATE_GPRS);
       ++    ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
        +    if (ret == -1) {
        +        return -1;
        +    }
       @@ -1025,13 +1023,13 @@
        +static int
        +nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
        +{
       -+    struct nvmm_event event;
       ++    struct nvmm_event *event = vcpu->event;
        +
       -+    event.type = NVMM_EVENT_EXCEPTION;
       -+    event.vector = 6;
       -+    event.u.error = 0;
       ++    event->type = NVMM_EVENT_EXCEPTION;
       ++    event->vector = 6;
       ++    event->u.error = 0;
        +
       -+    return nvmm_vcpu_inject(mach, vcpu->cpuid, &event);
       ++    return nvmm_vcpu_inject(mach, vcpu);
        +}
        +
        +static int
       @@ -1039,9 +1037,10 @@
        +{
        +    struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
        +    struct nvmm_machine *mach = get_nvmm_mach();
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
       ++    struct nvmm_vcpu *vcpu = &qcpu->vcpu;
        +    X86CPU *x86_cpu = X86_CPU(cpu);
       -+    struct nvmm_exit exit;
       ++    struct nvmm_exit *exit = vcpu->exit;
        +    int ret;
        +
        +    /*
       @@ -1091,9 +1090,9 @@
        +            cpu->vcpu_dirty = false;
        +        }
        +
       -+        if (vcpu->stop) {
       ++        if (qcpu->stop) {
        +            cpu->exception_index = EXCP_INTERRUPT;
       -+            vcpu->stop = false;
       ++            qcpu->stop = false;
        +            ret = 1;
        +            break;
        +        }
       @@ -1104,26 +1103,26 @@
        +            qemu_cpu_kick_self();
        +        }
        +
       -+        ret = nvmm_vcpu_run(mach, vcpu->cpuid, &exit);
       ++        ret = nvmm_vcpu_run(mach, vcpu);
        +        if (ret == -1) {
        +            error_report("NVMM: Failed to exec a virtual processor,"
        +                " error=%d", errno);
        +            break;
        +        }
        +
       -+        nvmm_vcpu_post_run(cpu, &exit);
       ++        nvmm_vcpu_post_run(cpu, exit);
        +
       -+        switch (exit.reason) {
       ++        switch (exit->reason) {
        +        case NVMM_EXIT_NONE:
        +            break;
        +        case NVMM_EXIT_MEMORY:
       -+            ret = nvmm_handle_mem(mach, vcpu, &exit);
       ++            ret = nvmm_handle_mem(mach, vcpu);
        +            break;
        +        case NVMM_EXIT_IO:
       -+            ret = nvmm_handle_io(mach, vcpu, &exit);
       ++            ret = nvmm_handle_io(mach, vcpu);
        +            break;
        +        case NVMM_EXIT_MSR:
       -+            ret = nvmm_handle_msr(mach, cpu, &exit);
       ++            ret = nvmm_handle_msr(mach, cpu, exit);
        +            break;
        +        case NVMM_EXIT_INT_READY:
        +        case NVMM_EXIT_NMI_READY:
       @@ -1134,7 +1133,7 @@
        +            ret = nvmm_inject_ud(mach, vcpu);
        +            break;
        +        case NVMM_EXIT_HALTED:
       -+            ret = nvmm_handle_halted(mach, cpu, &exit);
       ++            ret = nvmm_handle_halted(mach, cpu, exit);
        +            break;
        +        case NVMM_EXIT_SHUTDOWN:
        +            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
       @@ -1142,8 +1141,8 @@
        +            ret = 1;
        +            break;
        +        default:
       -+            error_report("NVMM: Unexpected VM exit code %lx",
       -+                exit.reason);
       ++            error_report("NVMM: Unexpected VM exit code %lx [hw=%lx]",
       ++                exit->reason, exit->u.inv.hwcode);
        +            nvmm_get_registers(cpu);
        +            qemu_mutex_lock_iothread();
        +            qemu_system_guest_panicked(cpu_get_crash_info(cpu));
       @@ -1220,11 +1219,11 @@
        +static void
        +nvmm_ipi_signal(int sigcpu)
        +{
       -+    struct nvmm_vcpu *vcpu;
       ++    struct qemu_vcpu *qcpu;
        +
        +    if (current_cpu) {
       -+        vcpu = get_nvmm_vcpu(current_cpu);
       -+        vcpu->stop = true;
       ++        qcpu = get_qemu_vcpu(current_cpu);
       ++        qcpu->stop = true;
        +    }
        +}
        +
       @@ -1250,7 +1249,7 @@
        +{
        +    struct nvmm_machine *mach = get_nvmm_mach();
        +    Error *local_error = NULL;
       -+    struct nvmm_vcpu *vcpu;
       ++    struct qemu_vcpu *qcpu;
        +    int ret;
        +
        +    nvmm_init_cpu_signals();
       @@ -1268,23 +1267,22 @@
        +        }
        +    }
        +
       -+    vcpu = g_malloc0(sizeof(struct nvmm_vcpu));
       -+    if (vcpu == NULL) {
       ++    qcpu = g_malloc0(sizeof(*qcpu));
       ++    if (qcpu == NULL) {
        +        error_report("NVMM: Failed to allocate VCPU context.");
        +        return -ENOMEM;
        +    }
       -+    vcpu->cpuid = cpu->cpu_index;
        +
       -+    ret = nvmm_vcpu_create(mach, vcpu->cpuid);
       ++    ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu);
        +    if (ret == -1) {
        +        error_report("NVMM: Failed to create a virtual processor,"
        +            " error=%d", errno);
       -+        g_free(vcpu);
       ++        g_free(qcpu);
        +        return -EINVAL;
        +    }
        +
        +    cpu->vcpu_dirty = true;
       -+    cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
       ++    cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu;
        +
        +    return 0;
        +}
       @@ -1316,9 +1314,9 @@
        +nvmm_destroy_vcpu(CPUState *cpu)
        +{
        +    struct nvmm_machine *mach = get_nvmm_mach();
       -+    struct nvmm_vcpu *vcpu = get_nvmm_vcpu(cpu);
       ++    struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
        +
       -+    nvmm_vcpu_destroy(mach, vcpu->cpuid);
       ++    nvmm_vcpu_destroy(mach, &qcpu->vcpu);
        +    g_free(cpu->hax_vcpu);
        +}
        +
       @@ -1505,13 +1503,13 @@
        +        return -ENOSPC;
        +    }
        +
       -+    ret = nvmm_machine_create(&nvmm_global.mach);
       ++    ret = nvmm_machine_create(&qemu_mach.mach);
        +    if (ret == -1) {
        +        error_report("NVMM: Machine creation failed, error=%d", errno);
        +        return -ENOSPC;
        +    }
        +
       -+    ret = nvmm_accel_configure(&nvmm_global.mach);
       ++    ret = nvmm_accel_configure(&qemu_mach.mach);
        +    if (ret == -1) {
        +        error_report("NVMM: Machine configuration failed, error=%d",
        +            errno);