drakvuf_t

 
typedef struct drakvuf* drakvuf_t;
 
struct drakvuf
{
    char* dom_name;
    domid_t domID;
    os_t os;
 
    char* json_kernel_path;
    char* json_wow_path;
    json_object* json_wow;
    bool libvmi_conf;
    bool get_userid;
 
    xen_interface_t* xen;
    os_interface_t osi;
    uint16_t altp2m_idx, altp2m_idr, altp2m_idrx;
    bool vcpu_monitor[MAX_DRAKVUF_VCPU];
 
    xen_pfn_t sink_page_gfn;
 
    event_response_t int3_response_flags;
 
    // VMI
    unsigned long flush_counter;
    GRecMutex vmi_lock;
    vmi_instance_t vmi;
 
    vmi_event_t cr3_event;
    vmi_event_t interrupt_event;
    vmi_event_t mem_event;
    vmi_event_t debug_event;
    vmi_event_t cpuid_event;
    vmi_event_t msr_event;
    vmi_event_t* step_event[MAX_DRAKVUF_VCPU];
 
    size_t* offsets;
    size_t* sizes;
    bitfield_t bitfields;
 
    size_t* wow_offsets;
 
    // Processing trap removals in trap callbacks
    // is problematic so we save all such requests
    // in a list to be processed after all callbacks
    // are finished.
    bool in_callback;
    GHashTable* remove_traps;
 
    int interrupted;
    page_mode_t pm;
    unsigned int vcpus;
    uint64_t init_memsize;
    xen_pfn_t max_gpfn;
    addr_t kernbase;
    addr_t kpgd;
 
    size_t address_width;
 
    GHashTable* remapped_gfns; // Key: gfn
    // val: remapped gfn
 
    GHashTable* breakpoint_lookup_pa;   // key: PA of trap
    // val: struct breakpoint
    GHashTable* breakpoint_lookup_gfn;  // key: gfn (size uint64_t)
    // val: GSList of addr_t* for trap locations
    GHashTable* breakpoint_lookup_trap; // key: trap pointer
    // val: struct breakpoint
 
    GHashTable* memaccess_lookup_gfn;  // key: gfn of trap
    // val: struct memaccess
    GHashTable* memaccess_lookup_trap; // key: trap pointer
    // val: struct memaccess
 
    GSList* cr0, *cr3, *cr4, *debug, *cpuid, *catchall_breakpoint, *msr;
 
    // list of processes to be intercepted
    bool enable_cr3_based_interception;
    GSList* context_switch_intercept_processes;
 
    GSList* event_fd_info;     // the list of registered event FDs
    struct pollfd* event_fds;  // auto-generated pollfd for poll()
    int event_fd_cnt;          // auto-generated for poll()
    fd_info_t fd_info_lookup;  // auto-generated for fast drakvuf_loop lookups
    int poll_rc;
 
    uint64_t event_counter;    // incremental unique trap event ID
 
    ipt_state_t ipt_state[MAX_DRAKVUF_VCPU];
 
    int64_t limited_traps_ttl;
};
syscalls::syscalls(drakvuf_t drakvuf, const syscalls_config* c, output_format_t output)
    : pluginex(drakvuf, output)
    , traps(NULL)
    , strings_to_free(NULL)
    , filter(NULL)
    , win32k_json(NULL)
    , format{output}
    , offsets(NULL)
{
    this->os = drakvuf_get_os_type(drakvuf);
    this->kernel_base = drakvuf_get_kernel_base(drakvuf);
    this->reg_size = drakvuf_get_address_width(drakvuf); // 4 or 8 (bytes)
    this->is32bit = (drakvuf_get_page_mode(drakvuf) != VMI_PM_IA32E);
    this->disable_sysret = c->disable_sysret;
 
    if ( c->syscalls_filter_file )
        this->filter = read_syscalls_filter(c->syscalls_filter_file);
    if ( c->win32k_profile )
        this->win32k_json = json_object_from_file(c->win32k_profile);
 
    if ( this->os == VMI_OS_WINDOWS )
        setup_windows(drakvuf, this);
    else
        setup_linux(drakvuf, this);
}

drakvuf_get_kernel_base

setup_linux

Inside setup_linux the drakvuf_add_trap function is called:

bool drakvuf_add_trap(drakvuf_t drakvuf, drakvuf_trap_t* trap)
{
    drakvuf_lock_and_get_vmi(drakvuf);
    bool ret = _drakvuf_add_trap(drakvuf, trap);
    drakvuf_release_vmi(drakvuf);
    return ret;
}
static bool _drakvuf_add_trap(drakvuf_t drakvuf, drakvuf_trap_t* trap)
{
    bool ret;
 
    if (!trap || !trap->cb)
        return 0;
 
    if (!trap->ah_cb)
        trap->ah_cb = drakvuf_unhook_trap;
 
    if (g_hash_table_lookup(drakvuf->remove_traps, trap))
    {
        g_hash_table_remove(drakvuf->remove_traps, trap);
        return 1;
    }
 
    drakvuf_pause(drakvuf);
 
    switch (trap->type)
    {
        case BREAKPOINT:
            ret = inject_trap_breakpoint(drakvuf, trap);
            break;
        case MEMACCESS:
            ret = inject_trap_mem(drakvuf, trap, 0);
            break;
        case REGISTER:
            ret = inject_trap_reg(drakvuf, trap);
            break;
        case DEBUG:
            ret = inject_trap_debug(drakvuf, trap);
            break;
        case CPUID:
            ret = inject_trap_cpuid(drakvuf, trap);
            break;
        case CATCHALL_BREAKPOINT:
            ret = inject_trap_catchall_breakpoint(drakvuf, trap);
            break;
        case __INVALID_TRAP_TYPE: /* fall-through */
        default:
            ret = 0;
            break;
    }
 
    drakvuf_resume(drakvuf);
    return ret;
}

Before proceeding it’s important to understand that “Guard2” types are protecting remapped gfns, thus when hit these need to be swapped to the altp2m_idr view.

injetc_trap_breakpoint


EVENT RESPONSE

Thee tracing of VMs is organized in three levels.

  • On the lowest level we use libvmi that provides basic access to the state of virtual machines. One layer above is the System Monitor (SM), which manages the usage of software breakpoints. This includes inserting and removing of breakpoints as well as calling functions whenever a breakpoint is reached.
  • To create a breakpoint we insert the instruction int 0x3 where the control flow should be interrupted, e.g., at the entry of a function. When this instruction is executed it causes a SIGTRAP signal which is handled by Xen, which sends an event on the event channel which is handled by libvmtrace.
  • Afterwards, libvmtrace calls the callback function which is registered for the breakpoint, which analyzes the process state. To continue the execution of the original code, the original instruction is inserted when the callback returns. Œen a single step operation is performed and the so‰ware breakpoint is inserted again. Finally, the execution continues normally

While the event is processed in the monitoring virtual machine, the production virtual machine is paused.

During initialization phase, on init_vmi function, drakvuf calls SETUP_MEM_EVENT and vmi_register_event to register the callback.

/*
 * Convenience macro to setup a memory event
 */
#define SETUP_MEM_EVENT(_event, _gfn, _access, _callback, _generic) \
        do { \
            (_event)->version = VMI_EVENTS_VERSION; \
            (_event)->type = VMI_EVENT_MEMORY; \
            (_event)->mem_event.gfn = _generic ? ~0ULL :_gfn; \
            (_event)->mem_event.in_access = _access; \
            (_event)->mem_event.generic = _generic; \
            (_event)->callback = _callback; \
        } while(0)
 
 
SETUP_MEM_EVENT(&drakvuf->mem_event, ~0ULL, VMI_MEMACCESS_RWX, pre_mem_cb, 1);
 
if (VMI_FAILURE == vmi_register_event(drakvuf->vmi, &drakvuf->mem_event))
{
      fprintf(stderr, "Failed to register generic mem event\n");
      return 0;
}

The vmi_register_event register the callback (event->callback) for memory access. The callback is defined as:

pre_mem_cb()

static event_response_t pre_mem_cb(vmi_instance_t vmi, vmi_event_t* event)
{
    UNUSED(vmi);
    drakvuf_t drakvuf = (drakvuf_t)event->data;
    drakvuf_lock_and_get_vmi(drakvuf);
    event_response_t ret = _pre_mem_cb(drakvuf, event);
    drakvuf_release_vmi(drakvuf);
    return ret;
}

_pre_mem_cb()

/* This hits on the first access on a page, so not in singlestep yet */
static event_response_t _pre_mem_cb(drakvuf_t drakvuf, vmi_event_t* event)
{
    event_response_t rsp = 0;
    addr_t pa = (event->mem_event.gfn<<12) + event->mem_event.offset;
 
    flush_vmi(drakvuf);
 
    if (event->mem_event.gfn == drakvuf->sink_page_gfn)
    {
        PRINT_DEBUG("Somebody try to do something to the empty page, let's emulate it\n");
        return VMI_EVENT_RESPONSE_EMULATE_NOWRITE;
    }
 
    if (event->slat_id == drakvuf->altp2m_idrx)
    {
        PRINT_DEBUG("Pre mem cb with vCPU %u @ 0x%lx in the IDRX view %u: %c%c%c\n",
            event->vcpu_id, pa, event->slat_id,
            (event->mem_event.out_access & VMI_MEMACCESS_R) ? 'r' : '-',
            (event->mem_event.out_access & VMI_MEMACCESS_W) ? 'w' : '-',
            (event->mem_event.out_access & VMI_MEMACCESS_X) ? 'x' : '-'
        );
 
        struct memcb_pass* pass = (struct memcb_pass*)g_slice_alloc0(sizeof(struct memcb_pass));
        pass->drakvuf = drakvuf;
        pass->gfn = event->mem_event.gfn;
        pass->pa = pa;
        pass->traps = (GSList*)g_hash_table_lookup(drakvuf->breakpoint_lookup_gfn, &pass->gfn);
        pass->remapped_gfn = (struct remapped_gfn*)g_hash_table_lookup(drakvuf->remapped_gfns, &pass->gfn);
 
        event->slat_id = 0;
 
        drakvuf->step_event[event->vcpu_id]->callback = post_mem_idrx_cb;
        drakvuf->step_event[event->vcpu_id]->data = pass;
 
        return VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP | // Turn on singlestep
            VMI_EVENT_RESPONSE_SLAT_ID;
    }
 
    struct wrapper* s = (struct wrapper*)g_hash_table_lookup(drakvuf->memaccess_lookup_gfn, &event->mem_event.gfn);
    if (!s)
    {
        PRINT_DEBUG("Event has been cleared for GFN 0x%lx but we are still in view %u\n",
            event->mem_event.gfn, event->slat_id);
        return 0;
    }
 
    PRINT_DEBUG("Pre mem cb with vCPU %u @ 0x%lx in view %u: %c%c%c\n",
        event->vcpu_id, pa, event->slat_id,
        (event->mem_event.out_access & VMI_MEMACCESS_R) ? 'r' : '-',
        (event->mem_event.out_access & VMI_MEMACCESS_W) ? 'w' : '-',
        (event->mem_event.out_access & VMI_MEMACCESS_X) ? 'x' : '-'
    );
 
    drakvuf_trap_info_t trap_info;
    proc_data_priv_t proc_data;
    proc_data_priv_t attached_proc_data;
    fill_common_event_trap_info(drakvuf, &trap_info, &proc_data, &attached_proc_data, event);
    trap_info.trap_pa = pa;
 
    if (s->traps)
        trap_info.event_uid = ++drakvuf->event_counter;
 
    GSList* loop = s->traps;
    drakvuf->in_callback = 1;
    while (loop)
    {
        drakvuf_trap_t* trap = (drakvuf_trap_t*)loop->data;
 
        if (trap->cb && trap->memaccess.type == PRE &&
            (trap->memaccess.access & event->mem_event.out_access))
        {
            trap_info.trap = trap;
            rsp |= trap->cb(drakvuf, &trap_info);
        }
 
        loop = loop->next;
    }
 
    /* We need to call breakpoint handlers registered for this physical address */
    if (event->mem_event.out_access & VMI_MEMACCESS_X)
    {
        struct wrapper* sbp = (struct wrapper*)g_hash_table_lookup(drakvuf->breakpoint_lookup_pa, &pa);
        if (sbp)
        {
            PRINT_DEBUG("Simulated INT3 event vCPU %u altp2m:%u CR3: 0x%"PRIx64" PA=0x%"PRIx64" RIP=0x%"PRIx64"\n",
                event->vcpu_id, event->slat_id, event->x86_regs->cr3, pa, event->x86_regs->rip);
 
            loop = sbp->traps;
            while (loop)
            {
                trap_info.trap = (drakvuf_trap_t*)loop->data;
 
                loop = loop->next;
                rsp |= trap_info.trap->cb(drakvuf, &trap_info);
            }
        }
    }
    drakvuf->in_callback = 0;
 
    /*
     * We don't need to pause the VM here because mem events
     * are safely cleared by LibVMI.
     */
    process_free_requests(drakvuf);
 
    // Check if we have traps still active on this page
    s = (struct wrapper*)g_hash_table_lookup(drakvuf->memaccess_lookup_gfn, &event->mem_event.gfn);
    if (s)
    {
        /*
         * There seems to be another trap still active
         * but it may already have another event queued that will clear it.
         */
        struct memcb_pass* pass = (struct memcb_pass*)g_slice_alloc0(sizeof(struct memcb_pass));
        pass->drakvuf = drakvuf;
        pass->gfn = event->mem_event.gfn;
        pass->pa = pa;
        pass->access = event->mem_event.out_access;
        pass->proc_data.base_addr = proc_data.base_addr;
        pass->proc_data.name      = proc_data.name;
        pass->proc_data.pid       = proc_data.pid;
        pass->proc_data.ppid      = proc_data.ppid;
        pass->proc_data.userid    = proc_data.userid;
        pass->proc_data.tid       = proc_data.tid;
        pass->attached_proc_data.base_addr = attached_proc_data.base_addr;
        pass->attached_proc_data.name      = attached_proc_data.name;
        pass->attached_proc_data.pid       = attached_proc_data.pid;
        pass->attached_proc_data.ppid      = attached_proc_data.ppid;
        pass->attached_proc_data.userid    = attached_proc_data.userid;
        pass->attached_proc_data.tid       = attached_proc_data.tid;
 
        if (!s->memaccess.guard2)
        {
            event->slat_id = 0;
 
            /*
             * If this is a remapped gfn and the page is getting written, the remapped copy needs to be updated
             */
            if ( event->mem_event.out_access & VMI_MEMACCESS_W )
            {
                pass->traps = (GSList*)g_hash_table_lookup(drakvuf->breakpoint_lookup_gfn, &pass->gfn);
                if ( pass->traps )
                    pass->remapped_gfn = (struct remapped_gfn*)g_hash_table_lookup(drakvuf->remapped_gfns, &pass->gfn);
            }
        }
        else
        {
            event->slat_id = drakvuf->altp2m_idr;
            if (event->mem_event.out_access & VMI_MEMACCESS_W)
            {
                g_slice_free(struct memcb_pass, pass);
                free_proc_data_priv_2(&proc_data, &attached_proc_data);
                PRINT_DEBUG("Somebody try to write to the shadow page, let's emulate it instead\n");
                return rsp | VMI_EVENT_RESPONSE_EMULATE_NOWRITE;
            }
        }
 
        if ( drakvuf->step_event[event->vcpu_id]->callback == post_mem_cb )
        {
            fprintf(stderr, "Error, post_mem_cb wasn't called when expected!\n");
            drakvuf->interrupted = -1;
            g_slice_free(struct memcb_pass, pass);
            free_proc_data_priv_2(&proc_data, &attached_proc_data);
            return 0;
        }
 
        PRINT_DEBUG("Switching to altp2m view %u on vCPU %u and waiting for post_mem cb\n",
            event->slat_id, event->vcpu_id);
 
        drakvuf->step_event[event->vcpu_id]->callback = post_mem_cb;
        drakvuf->step_event[event->vcpu_id]->data = pass;
        return rsp |
            VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP | // Turn on singlestep
            VMI_EVENT_RESPONSE_SLAT_ID;
    }
 
    free_proc_data_priv_2(&proc_data, &attached_proc_data);
    return rsp;
}

The following flow is used to register handlers, xen event channels and callbacks:

process_memprocess_responseissue_mem_cbevent->callback

process_mem

static
status_t process_mem(vmi_instance_t vmi, vm_event_compat_t *vmec)
{
    vmi_event_t *event;
    vmi_mem_access_t out_access = VMI_MEMACCESS_INVALID;
 
    if (vmec->mem_access.flags & MEM_ACCESS_R) out_access |= VMI_MEMACCESS_R;
    if (vmec->mem_access.flags & MEM_ACCESS_W) out_access |= VMI_MEMACCESS_W;
    if (vmec->mem_access.flags & MEM_ACCESS_X) out_access |= VMI_MEMACCESS_X;
 
    if ( g_hash_table_size(vmi->mem_events_on_gfn) ) {
        event = g_hash_table_lookup(vmi->mem_events_on_gfn, &vmec->mem_access.gfn);
 
        if (event && (event->mem_event.in_access & out_access) ) {
            event->x86_regs = &vmec->data.regs.x86;
            event->slat_id = vmec->altp2m_idx;
            event->vcpu_id = vmec->vcpu_id;
            event->page_mode = vmec->pm;
 
            vmi->event_callback = 1;
            process_response( issue_mem_cb(vmi, event, vmec, out_access), event, vmec );
            vmi->event_callback = 0;
 
            return VMI_SUCCESS;
        }
    }
 
    if ( g_hash_table_size(vmi->mem_events_generic) ) {
        GHashTableIter i;
        vmi_mem_access_t *key = NULL;
        bool cb_issued = 0;
 
        ghashtable_foreach(vmi->mem_events_generic, i, &key, &event) {
            if ( (*key) & out_access ) {
                event->x86_regs = &vmec->data.regs.x86;
                event->slat_id = vmec->altp2m_idx;
                event->vcpu_id = vmec->vcpu_id;
                event->page_mode = vmec->pm;
 
                vmi->event_callback = 1;
                process_response( issue_mem_cb(vmi, event, vmec, out_access), event, vmec );
                vmi->event_callback = 0;
 
                cb_issued = 1;
            }
        }
 
        if ( cb_issued )
            return VMI_SUCCESS;
    }
 
    /*
     * TODO: Could this happen when using multi-vCPU VMs where multiple vCPU's trigger
     *       the same violation and the event is already being passed to vmi_step_event?
     *       The event in that case would be already removed from the GHashTable so
     *       the second violation on the other vCPU would not get delivered..
     */
    errprint("Caught a memory event that had no handler registered in LibVMI @ GFN 0x%" PRIx64 " (0x%" PRIx64 "), access: %u\n",
             vmec->mem_access.gfn, (vmec->mem_access.gfn<<12) + vmec->mem_access.offset, out_access);
    return VMI_FAILURE;
}

process_response

/*
 * Here we check for response flags placed on the event in the callback
 * that allows triggering Xen vm_event response flags.
 */
static
void process_response ( event_response_t response, vmi_event_t *event, vm_event_compat_t *rsp )
{
    /*
     * The only flag we keep from the request
     */
    rsp->flags = (rsp->flags & VM_EVENT_FLAG_VCPU_PAUSED);
 
    if ( response && event ) {
        uint32_t i = VMI_EVENT_RESPONSE_NONE+1;
 
        for (; i<=__VMI_EVENT_RESPONSE_MAX; i++) {
            event_response_t er = 1u << i;
 
            if ( response & er ) {
                switch ( er ) {
                    case VMI_EVENT_RESPONSE_VMM_PAGETABLE_ID:
                        rsp->altp2m_idx = event->slat_id;
                        break;
                    case VMI_EVENT_RESPONSE_SET_EMUL_READ_DATA:
                        if ( event->emul_read ) {
                            rsp->flags |= event_response_conversion[VMI_EVENT_RESPONSE_EMULATE];
 
                            if ( event->emul_read->size < sizeof(event->emul_read->data) )
                                rsp->data.emul.read.size = event->emul_read->size;
                            else
                                rsp->data.emul.read.size = sizeof(event->emul_read->data);
 
                            memcpy(&rsp->data.emul.read.data,
                                   &event->emul_read->data,
                                   rsp->data.emul.read.size);
 
                            if ( !event->emul_read->dont_free ) {
                                free(event->emul_read);
                                event->emul_read = NULL;
                            }
                        }
                        break;
                    case VMI_EVENT_RESPONSE_SET_EMUL_INSN:
                        if ( event->emul_insn ) {
                            rsp->flags |= event_response_conversion[VMI_EVENT_RESPONSE_EMULATE];
 
                            memcpy(&rsp->data.emul.insn.data,
                                   &event->emul_insn->data,
                                   sizeof(rsp->data.emul.insn.data));
 
                            if ( !event->emul_insn->dont_free ) {
                                free(event->emul_insn);
                                event->emul_insn = NULL;
                            }
                        }
                        break;
                    case VMI_EVENT_RESPONSE_NEXT_SLAT_ID:
                        rsp->fast_singlestep.p2midx = event->next_slat_id;
                        break;
                };
 
                rsp->flags |= event_response_conversion[er];
            }
        }
    }
}

issue_mem_cb

static inline
event_response_t issue_mem_cb(vmi_instance_t vmi,
                              vmi_event_t *event,
                              vm_event_compat_t *vmec,
                              vmi_mem_access_t out_access)
{
    if ( vmec->mem_access.flags & MEM_ACCESS_GLA_VALID ) {
        event->mem_event.gptw = !!(vmec->mem_access.flags & MEM_ACCESS_FAULT_IN_GPT);
        event->mem_event.gla_valid = 1;
        event->mem_event.gla = vmec->mem_access.gla;
    } else
        event->mem_event.gla = 0ull;
 
    event->mem_event.gfn = vmec->mem_access.gfn;
    event->mem_event.offset = vmec->mem_access.offset;
    event->mem_event.out_access = out_access;
    event->vcpu_id = vmec->vcpu_id;
 
    return event->callback(vmi, event);
}

The xen_init_events will establish the events channel

drakvuf_initinit_vmivmi_initdriver_init_vmixen_init_vmixen_init_events

xen_init_eventsprocess_event[VM_EVENT_REASON_MEM_ACCESS] = &process_mem

xen_init_eventsinit_events_nprocess_requests_nprocess_requestprocess_event

xen_events_listenprocess_request

init_events_n

status_t init_events_7(vmi_instance_t vmi)
{
    xen_events_t *xe = xen_get_events(vmi);
 
    xe->process_requests = &process_requests_7;
    vmi->driver.are_events_pending_ptr = &xen_are_events_pending_7;
 
    SHARED_RING_INIT((vm_event_7_sring_t *)xe->ring_page);
    BACK_RING_INIT(&xe->back_ring_7,
                   (vm_event_7_sring_t *)xe->ring_page,
                   XC_PAGE_SIZE);
 
    return VMI_SUCCESS;
}

process_requests_n

status_t process_requests_7(vmi_instance_t vmi, uint32_t *requests_processed)
{
    vm_event_7_request_t *req;
    vm_event_7_response_t *rsp;
    vm_event_compat_t vmec = { 0 };
    xen_events_t *xe = xen_get_events(vmi);
    xen_instance_t *xen = xen_get_instance(vmi);
    int rc;
    status_t vrc = VMI_SUCCESS;
    uint32_t processed = 0;
 
    while ( RING_HAS_UNCONSUMED_REQUESTS(&xe->back_ring_7) ) {
 
        ring_get_request_and_response_7(xe, &req, &rsp);
 
        if ( req->version != 0x00000007 ) {
            errprint("Error, Xen reports a VM_EVENT_INTERFACE_VERSION that is different then what we expect (0x%x != 0x%x)!\n",
                     req->version, 0x00000007);
            return VMI_FAILURE;
        }
 
        vmec.version = req->version;
        vmec.flags = req->flags;
        vmec.reason = req->reason;
        vmec.vcpu_id = req->vcpu_id;
        vmec.altp2m_idx = req->altp2m_idx;
 
#if defined(ARM32) || defined(ARM64)
        memcpy(&vmec.data.regs.arm, &req->data.regs.arm, sizeof(vmec.data.regs.arm));
#elif defined(I386) || defined(X86_64)
	
(...)
 
        if ( !(vmec.flags & VM_EVENT_FLAG_NESTED_P2M) )
            vmec.data.regs.x86.npt_base = 0;
        else
            vmec.data.regs.x86.npt_base = req->data.regs.x86.npt_base;
#endif
 
        switch ( vmec.reason ) {
            case VM_EVENT_REASON_MEM_ACCESS:
                memcpy(&vmec.mem_access, &req->u.mem_access, sizeof(vmec.mem_access));
                break;
 
            case VM_EVENT_REASON_WRITE_CTRLREG:
                memcpy(&vmec.write_ctrlreg, &req->u.write_ctrlreg, sizeof(vmec.write_ctrlreg));
                break;
 
            case VM_EVENT_REASON_MOV_TO_MSR:
                memcpy(&vmec.mov_to_msr, &req->u.mov_to_msr, sizeof(vmec.mov_to_msr));
                break;
 
            case VM_EVENT_REASON_SINGLESTEP:
                memcpy(&vmec.singlestep, &req->u.singlestep, sizeof(vmec.singlestep));
                break;
 
            case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
                vmec.software_breakpoint.gfn = req->u.software_breakpoint.gfn;
                vmec.software_breakpoint.insn_length = req->u.software_breakpoint.insn_length;
                break;
 
            case VM_EVENT_REASON_INTERRUPT:
                memcpy(&vmec.x86_interrupt, &req->u.interrupt.x86, sizeof(vmec.x86_interrupt));
                break;
 
            case VM_EVENT_REASON_DEBUG_EXCEPTION:
                vmec.debug_exception.gfn = req->u.debug_exception.gfn;
                vmec.debug_exception.insn_length = req->u.debug_exception.insn_length;
                vmec.debug_exception.type = req->u.debug_exception.type;
                break;
 
            case VM_EVENT_REASON_CPUID:
                memcpy(&vmec.cpuid, &req->u.cpuid, sizeof(vmec.cpuid));
                break;
 
            case VM_EVENT_REASON_DESCRIPTOR_ACCESS:
                memcpy(&vmec.desc_access, &req->u.desc_access, sizeof(vmec.desc_access));
                break;
        }
 
        vrc = process_request(vmi, &vmec);
#ifdef ENABLE_SAFETY_CHECKS
        if ( VMI_FAILURE == vrc )
            break;
#endif
 
        rsp->version = vmec.version;
        rsp->vcpu_id = vmec.vcpu_id;
        rsp->flags = vmec.flags;
        rsp->reason = vmec.reason;
        rsp->altp2m_idx = vmec.altp2m_idx;
 
        if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA ) {
            rsp->data.emul.read.size = vmec.data.emul.read.size;
            memcpy(&rsp->data.emul.read.data, &vmec.data.emul.read.data, vmec.data.emul.read.size);
        }
 
        if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
            memcpy(&rsp->data.emul.insn, &vmec.data.emul.insn, sizeof(rsp->data.emul.insn));
 
        if ( rsp->flags & VM_EVENT_FLAG_FAST_SINGLESTEP )
            rsp->u.fast_singlestep.p2midx = vmec.fast_singlestep.p2midx;
 
        if ( rsp->flags & VM_EVENT_FLAG_SET_REGISTERS ) {
#if defined(ARM32) || defined(ARM64)
            memcpy(&rsp->data.regs.arm, &vmec.data.regs.arm, sizeof(rsp->data.regs.arm));
#elif defined(I386) || defined(X86_64)
 
           (...)
 
#endif
        }
 
        processed++;
        RING_PUSH_RESPONSES(&xe->back_ring_7);
 
        /*
         * Send notification to Xen that response(s) were placed on the ring
         *
         * Note: it is more performant to send notification after each event if
         * there are a lot of vCPUs assigned to the VM.
         */
        if (vmi->num_vcpus >= 7) {
            rc = xen->libxcw.xc_evtchn_notify(xe->xce_handle, xe->port);
 
#ifdef ENABLE_SAFETY_CHECKS
            if ( rc ) {
                errprint("Error sending event channel notification.\n");
                return VMI_FAILURE;
            }
#endif
        }
    }
 
    *requests_processed = processed;
    return vrc;
}

process_request

static
status_t process_request(vmi_instance_t vmi, vm_event_compat_t *vmec)
{
    xen_events_t *xe = xen_get_events(vmi);
 
#ifdef ENABLE_SAFETY_CHECKS
    if ( !xe->process_event[vmec->reason] )
        return VMI_FAILURE;
#endif
 
    if ( !(vmec->flags & VM_EVENT_FLAG_ALTERNATE_P2M) )
        vmec->altp2m_idx = 0;
 
#if defined(I386) || defined(X86_64)
    vmec->pm = get_page_mode_x86(vmec->data.regs.x86.cr0, vmec->data.regs.x86.cr4, vmec->data.regs.x86.msr_efer);
#endif
 
    return xe->process_event[vmec->reason](vmi, vmec);
}

xen_events_listen

The callback to handle events (interrupts on Xen terminology [?] ) is registred. The drakvuf will loop:


🌱 Back to Garden

3 items under this folder.