• init_vmi is the drakvuf wrapper to call LibVMI vmi_init function
bool init_vmi(drakvuf_t drakvuf, bool fast_singlestep)
{
    int rc;
 
    vmi_init_data_t* init_data = (vmi_init_data_t*)g_try_malloc0(sizeof(vmi_init_data_t) + sizeof(vmi_init_data_entry_t));
    if ( !init_data )
        return 0;
 
    init_data->count = 1;
    init_data->entry[0].type = VMI_INIT_DATA_XEN_EVTCHN;
    init_data->entry[0].data = (void*) drakvuf->xen->evtchn;
 
    PRINT_DEBUG("init_vmi on domID %u -> %s\n", drakvuf->domID, drakvuf->dom_name);
 
    /* initialize the libvmi library */
    status_t status = vmi_init(&drakvuf->vmi,
            VMI_XEN,
            &drakvuf->domID,
            VMI_INIT_DOMAINID | VMI_INIT_EVENTS,
            init_data,
            NULL);
    g_free(init_data);
    if ( VMI_FAILURE == status )
    {
        fprintf(stderr, "Failed to init LibVMI library.\n");
        return 0;
    }
    PRINT_DEBUG("init_vmi: initializing vmi done\n");
 
    drakvuf->vcpus = vmi_get_num_vcpus(drakvuf->vmi);
    drakvuf->init_memsize = xen_get_maxmemkb(drakvuf->xen, drakvuf->domID);
 
    if ( xc_domain_maximum_gpfn(drakvuf->xen->xc, drakvuf->domID, &drakvuf->max_gpfn) < 0 )
        return 0;
 
    PRINT_DEBUG("Max GPFN: 0x%lx\n", drakvuf->max_gpfn);
 
    // Crete tables to lookup breakpoints
    drakvuf->breakpoint_lookup_pa = g_hash_table_new_full(g_int64_hash, g_int64_equal, free, free_wrapper);
    drakvuf->breakpoint_lookup_gfn = g_hash_table_new_full(g_int64_hash, g_int64_equal, free, NULL);
    drakvuf->breakpoint_lookup_trap = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
    drakvuf->memaccess_lookup_gfn = g_hash_table_new_full(g_int64_hash, g_int64_equal, free, free_wrapper);
    drakvuf->memaccess_lookup_trap = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
    drakvuf->remapped_gfns = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free_remapped_gfn);
    drakvuf->remove_traps = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
 
    unsigned int i;
    /*
     * Setup singlestep event handlers but don't turn on MTF.
     * Max MAX_DRAKVUF_VCPU CPUs!
     */
    for (i = 0; i < drakvuf->vcpus && i < MAX_DRAKVUF_VCPU; i++)
    {
        drakvuf->step_event[i] = (vmi_event_t*)g_try_malloc0(sizeof(vmi_event_t));
        if ( !drakvuf->step_event[i] )
        {
            fprintf(stderr, "Out of memory during initialization\n");
            return 0;
        }
 
        SETUP_SINGLESTEP_EVENT(drakvuf->step_event[i], 1u << i, vmi_reset_trap, 0);
        drakvuf->step_event[i]->data = drakvuf;
        if (VMI_FAILURE == vmi_register_event(drakvuf->vmi, drakvuf->step_event[i]))
        {
            fprintf(stderr, "Failed to register singlestep for vCPU %u\n", i);
            return 0;
        }
    }
 
    /* domain->max_pages is mostly just an annoyance that we can safely ignore */
    rc = xc_domain_setmaxmem(drakvuf->xen->xc, drakvuf->domID, ~0);
    PRINT_DEBUG("Max mem set? %i\n", rc);
    if (rc < 0)
        return 0;
 
    drakvuf->sink_page_gfn = ++(drakvuf->max_gpfn);
 
    rc = xc_domain_populate_physmap_exact(drakvuf->xen->xc, drakvuf->domID, 1, 0, 0, &drakvuf->sink_page_gfn);
    PRINT_DEBUG("Physmap populated? %i\n", rc);
    if (rc < 0)
        return 0;
 
    uint8_t fmask[VMI_PS_4KB] = {[0 ... VMI_PS_4KB-1] = 0xFF};
    if (VMI_FAILURE == vmi_write_pa(drakvuf->vmi, drakvuf->sink_page_gfn<<12, VMI_PS_4KB, &fmask, NULL))
    {
        PRINT_DEBUG("Failed to mask FF to the empty page\n");
        return 0;
    }
 
    bool altp2m = xen_enable_altp2m(drakvuf->xen, drakvuf->domID);
    PRINT_DEBUG("Altp2m enabled? %i\n", altp2m);
    if (!altp2m)
        return 0;
 
    /*
     * Create altp2m view
     *
     * The idx view is used primarily during DRAKVUF execution. In this view all breakpointed
     * pages will have their shadow copies activated.
     */
    status = vmi_slat_create(drakvuf->vmi, &drakvuf->altp2m_idx);
    if (VMI_FAILURE == status)
    {
        PRINT_DEBUG("Altp2m view X creation failed\n");
        return 0;
    }
    PRINT_DEBUG("Altp2m view X created with ID %u\n", drakvuf->altp2m_idx);
 
    /*
     * We will use the idr view to map all shadow pages to the sink page in case
     * something is trying to check the contents of the shadow pages.
     */
    status = vmi_slat_create(drakvuf->vmi, &drakvuf->altp2m_idr);
    if (VMI_FAILURE == status)
    {
        PRINT_DEBUG("Altp2m view R creation failed\n");
        return 0;
    }
    PRINT_DEBUG("Altp2m view R created with ID %u\n", drakvuf->altp2m_idr);
 
    /*
     * IDRX View is used for context based interception, in order to protect
     * pages that has breakpoints during execution of unmonitored contexts.
     */
    drakvuf->context_switch_intercept_processes = NULL;
    status = vmi_slat_create(drakvuf->vmi, &drakvuf->altp2m_idrx);
    if (VMI_FAILURE == status)
    {
        PRINT_DEBUG("Altp2m view RW creation failed\n");
        return 0;
    }
    PRINT_DEBUG("Altp2m view RW created with ID %u\n", drakvuf->altp2m_idrx);
 
    SETUP_INTERRUPT_EVENT(&drakvuf->interrupt_event, int3_cb);
    drakvuf->interrupt_event.data = drakvuf;
 
    if (VMI_FAILURE == vmi_register_event(drakvuf->vmi, &drakvuf->interrupt_event))
    {
        fprintf(stderr, "Failed to register interrupt event\n");
        return 0;
    }
 
    SETUP_MEM_EVENT(&drakvuf->mem_event, ~0ULL, VMI_MEMACCESS_RWX, pre_mem_cb, 1);
    drakvuf->mem_event.data = drakvuf;
 
    if (VMI_FAILURE == vmi_register_event(drakvuf->vmi, &drakvuf->mem_event))
    {
        fprintf(stderr, "Failed to register generic mem event\n");
        return 0;
    }
 
    if (VMI_FAILURE == vmi_set_mem_event(drakvuf->vmi, drakvuf->sink_page_gfn, VMI_MEMACCESS_RWX, drakvuf->altp2m_idx))
    {
        PRINT_DEBUG("Sink page protection failed in IDX view\n");
        return 0;
    }
 
    if (VMI_FAILURE == vmi_set_mem_event(drakvuf->vmi, drakvuf->sink_page_gfn, VMI_MEMACCESS_RWX, drakvuf->altp2m_idrx))
    {
        PRINT_DEBUG("Sink page protection failed in IDRX view\n");
        return 0;
    }
 
    status = vmi_slat_switch(drakvuf->vmi, drakvuf->altp2m_idx);
    if (VMI_FAILURE == status)
    {
        PRINT_DEBUG("Failed to switch Altp2m view to X\n");
        return 0;
    }
 
    // TODO: Fast singlestep is disabled by default for now while a bug is being fixed upstream in Xen
    if ( fast_singlestep && xen_version() >= 14 )
        drakvuf->int3_response_flags = VMI_EVENT_RESPONSE_SLAT_ID |     // Switch to this ID immediately
            VMI_EVENT_RESPONSE_NEXT_SLAT_ID; // Switch to next ID after singlestepping a single instruction
    else
        drakvuf->int3_response_flags = VMI_EVENT_RESPONSE_SLAT_ID |     // Switch to this ID immediately
            VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP; // Turn on singlestep
 
    PRINT_DEBUG("init_vmi finished\n");
    return 1;
}
status_t vmi_init(
    vmi_instance_t *vmi,
    vmi_mode_t mode,
    const void *domain,
    uint64_t init_flags,
    vmi_init_data_t *init_data,
    vmi_init_error_t *error)
{
    if ( VMI_FAILURE == driver_sanity_check(mode) ) {
        errprint("The selected LibVMI mode is not available!\n");
        return VMI_FAILURE;
    }
 
    status_t status = VMI_FAILURE;
 
    /* allocate memory for instance structure */
    vmi_instance_t _vmi = (vmi_instance_t) g_try_malloc0(sizeof(struct vmi_instance));
    if ( !_vmi )
        return VMI_FAILURE;
 
    /* initialize instance struct to default values */
    dbprint(VMI_DEBUG_CORE, "LibVMI Version %s\n", PACKAGE_VERSION);
 
    _vmi->mode = mode;
    dbprint(VMI_DEBUG_CORE, "LibVMI Driver Mode %d\n", _vmi->mode);
 
    _vmi->init_flags = init_flags;
    _vmi->page_mode = VMI_PM_UNKNOWN;
 
    arch_init_lookup_tables(_vmi);
 
    if ( init_data && init_data->count ) {
        uint64_t i;
        for (i=0; i < init_data->count; i++) {
            switch (init_data->entry[i].type) {
                case VMI_INIT_DATA_MEMMAP:
                    _vmi->memmap = (memory_map_t*)g_memdup_compat(init_data->entry[i].data, sizeof(memory_map_t));
                    if ( !_vmi->memmap )
                        goto error_exit;
                    break;
                default:
                    break;
            };
        }
    }
 
    /* setup the page offset size */
    if (VMI_FAILURE == init_page_offset(_vmi)) {
        if ( error )
            *error = VMI_INIT_ERROR_DRIVER;
 
        goto error_exit;
    }
 
    /* driver-specific initilization */
    if (VMI_FAILURE == driver_init(_vmi, init_flags, init_data)) {
        if ( error )
            *error = VMI_INIT_ERROR_DRIVER;
 
        goto error_exit;
    }
    dbprint(VMI_DEBUG_CORE, "--completed driver init.\n");
 
    if (init_flags & VMI_INIT_DOMAINWATCH) {
        if ( VMI_FAILURE == driver_domainwatch_init(_vmi, init_flags) ) {
            if ( error )
                *error = VMI_INIT_ERROR_DRIVER;
            goto error_exit;
        }
 
        if ( init_flags == VMI_INIT_DOMAINWATCH ) {
            /* we have all we need to wait for domains. Return if there is nothing else */
            *vmi = _vmi;
            return VMI_SUCCESS;
        }
    }
 
    /* resolve the id and name */
    if (VMI_FAILURE == set_id_and_name(_vmi, domain)) {
        if ( error )
            *error = VMI_INIT_ERROR_VM_NOT_FOUND;
 
        goto error_exit;
    }
 
    /* init vmi for specific file/domain through the driver */
    if (VMI_FAILURE == driver_init_vmi(_vmi, init_flags, init_data)) {
        if ( error )
            *error = VMI_INIT_ERROR_DRIVER;
 
        goto error_exit;
    }
 
    /* get the memory size */
    if (driver_get_memsize(_vmi, &_vmi->allocated_ram_size, &_vmi->max_physical_address) == VMI_FAILURE) {
        if ( error )
            *error = VMI_INIT_ERROR_DRIVER;
 
        goto error_exit;
    }
 
    /* setup the caches */
    pid_cache_init(_vmi);
    sym_cache_init(_vmi);
    rva_cache_init(_vmi);
    v2p_cache_init(_vmi);
 
    status = VMI_SUCCESS;
 
    dbprint(VMI_DEBUG_CORE, "**set allocated_ram_size = %"PRIx64", "
            "max_physical_address = 0x%"PRIx64"\n",
            _vmi->allocated_ram_size,
            _vmi->max_physical_address);
 
    if ( init_flags & VMI_INIT_EVENTS ) {
        status = events_init(_vmi);
        if ( error && VMI_FAILURE == status )
            *error = VMI_INIT_ERROR_EVENTS;
    }
 
error_exit:
    if ( VMI_FAILURE == status ) {
        vmi_destroy(_vmi);
        *vmi = NULL;
    } else
        *vmi = _vmi;
 
    return status;
}

🌱 Back to Garden