status_t xen_init_events(
    vmi_instance_t vmi,
    uint32_t init_flags,
    vmi_init_data_t *init_data)
{
    xen_events_t * xe = NULL;
    xen_instance_t *xen = xen_get_instance(vmi);
    xc_interface * xch = xen_get_xchandle(vmi);
    domid_t dom = xen_get_domainid(vmi);
    int rc;
 
    (void)init_flags; // maybe unused
 
#ifdef ENABLE_SAFETY_CHECKS
    if ( !xen ) {
        errprint("%s error: invalid xen_instance_t handle\n", __FUNCTION__);
        return VMI_FAILURE;
    }
    if ( !xch ) {
        errprint("%s error: invalid xc_interface handle\n", __FUNCTION__);
        return VMI_FAILURE;
    }
    if ( dom == (domid_t)VMI_INVALID_DOMID ) {
        errprint("%s error: invalid domid\n", __FUNCTION__);
        return VMI_FAILURE;
    }
#endif
 
    if ( xen->major_version != 4 || xen->minor_version < 6 ) {
        errprint("%s error: version of Xen is not supported\n", __FUNCTION__);
        return VMI_FAILURE;
    }
    if ( xen->events )
        xe = xen->events;
    else {
        // Allocate memory
        xe = g_try_malloc0(sizeof(xen_events_t));
        if ( !xe ) {
            errprint("%s error: allocation for xen_events_t failed\n", __FUNCTION__);
            goto err;
        }
    }

Here starts the event channel setup for notification delivery.

 // Enable monitor page
    xe->ring_page = xen->libxcw.xc_monitor_enable(xch, dom, &xe->evtchn_port);
    if ( !xe->ring_page ) {
        switch ( errno ) {
            case EBUSY:
                errprint("vm_event is (or was) active on this domain\n");
                break;
            case ENODEV:errprint("vm_event is not supported for this guest\n");
                break;
            default:
                errprint("Error enabling vm_event\n");
                break;
        }
        goto err;
    }
 
    if ( init_data && init_data->count ) {
        uint64_t i;
        for (i=0; i < init_data->count; i++) {
            if ( init_data->entry[i].type != VMI_INIT_DATA_XEN_EVTCHN )
                continue;
 
            xe->xce_handle = init_data->entry[i].data;
            xe->external_poll = 1;
            break;
        }
    }

Gets mapped event ring page in memory:

xc_monitor_enable

void *xc_monitor_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
{
    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
                              port);
}

xc_vm_event_enable

 
/*
* Enables vm_event and returns the mapped ring page indicated by param.
* param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
*/
 
void *xc_vm_event_enable(xc_interface *xch, domid_t domain_id, int param,
                         uint32_t *port)
{
    void *ring_page = NULL;
    uint64_t pfn;
    xen_pfn_t ring_pfn, mmap_pfn;
    unsigned int op, mode;
    int rc1, rc2, saved_errno;
 
    if ( !port )
    {
        errno = EINVAL;
        return NULL;
    }
 
    /* Pause the domain for ring page setup */
    rc1 = xc_domain_pause(xch, domain_id);
    if ( rc1 != 0 )
    {
        PERROR("Unable to pause domain\n");
        return NULL;
    }
 
    /* Get the pfn of the ring page */
    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
    if ( rc1 != 0 )
    {
        PERROR("Failed to get pfn of ring page\n");
        goto out;
    }
 
    ring_pfn = pfn;
    mmap_pfn = pfn;
    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
    {
        /* Page not in the physmap, try to populate it */
        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
                                              &ring_pfn);
        if ( rc1 != 0 )
        {
            PERROR("Failed to populate ring pfn\n");
            goto out;
        }
    }
 
    mmap_pfn = ring_pfn;
    ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
                                         &mmap_pfn, 1);
    if ( !ring_page )
    {
        PERROR("Could not map the ring page\n");
        goto out;
    }
 
    switch ( param )
    {
    case HVM_PARAM_PAGING_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
        break;
 
    case HVM_PARAM_MONITOR_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
        break;
 
    case HVM_PARAM_SHARING_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
        break;
 
    /*
     * This is for the outside chance that the HVM_PARAM is valid but is invalid
     * as far as vm_event goes.
     */
    default:
        errno = EINVAL;
        rc1 = -1;
        goto out;
    }
 
    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
    if ( rc1 != 0 )
    {
        PERROR("Failed to enable vm_event\n");
        goto out;
    }
 
    /* Remove the ring_pfn from the guest's physmap */
    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
    if ( rc1 != 0 )
        PERROR("Failed to remove ring page from guest physmap");
 
 out:
    saved_errno = errno;
 
    rc2 = xc_domain_unpause(xch, domain_id);
    if ( rc1 != 0 || rc2 != 0 )
    {
        if ( rc2 != 0 )
        {
            if ( rc1 == 0 )
                saved_errno = errno;
            PERROR("Unable to unpause domain");
        }
 
        if ( ring_page )
            xenforeignmemory_unmap(xch->fmem, ring_page, 1);
        ring_page = NULL;
 
        errno = saved_errno;
    }
 
    return ring_page;
}

 if ( !xe->xce_handle ) {
        // Open event channel
        xe->xce_handle = xen->libxcw.xc_evtchn_open(NULL, 0);
        if ( !xe->xce_handle ) {
            errprint("Failed to open event channel\n");
            goto err;
        }
    }
 
    // Setup poll
    xe->fd[0].fd = xen->libxcw.xc_evtchn_fd(xe->xce_handle);
    xe->fd[0].events = POLLIN | POLLERR;
 
    *(uint16_t *)&xe->fd_size = 1;
 
#ifdef HAVE_LIBXENSTORE
    if ( init_flags & VMI_INIT_DOMAINWATCH )
        *(uint16_t *)&xe->fd_size = 2;
#endif

xc_evtchn_open

xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
                          unsigned open_flags)
{
    return xenevtchn_open(logger, open_flags);
}

xenevtchn_open

xenevtchn_handle *xenevtchn_open(xentoollog_logger *logger, unsigned open_flags)
{
    xenevtchn_handle *xce = malloc(sizeof(*xce));
    int rc;
 
    if (!xce) return NULL;
 
    xce->fd = -1;
    xce->logger = logger;
    xce->logger_tofree  = NULL;
 
    if (!xce->logger) {
        xce->logger = xce->logger_tofree =
            (xentoollog_logger*)
            xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
        if (!xce->logger) goto err;
    }
 
    rc = osdep_evtchn_open(xce);
    if ( rc  < 0 ) goto err;
 
    return xce;
 
err:
    osdep_evtchn_close(xce);
    xtl_logger_destroy(xce->logger_tofree);
    free(xce);
    return NULL;
}

osdep_evtchn_open (linux)

int osdep_evtchn_open(xenevtchn_handle *xce)
{
    int fd = open("/dev/xen/evtchn", O_RDWR|O_CLOEXEC);
    if ( fd == -1 )
        return -1;
    xce->fd = fd;
    return 0;
}

xc_evtchn_fd

int xc_evtchn_fd(xc_evtchn *xce)
{
    return xenevtchn_fd(xce);
}

xenevtchn_fd

int xenevtchn_fd(xenevtchn_handle *xce)
{
    return xce->fd;
}

Connect both domains using the event channel.

    // Bind event notification
    rc = xen->libxcw.xc_evtchn_bind_interdomain(xe->xce_handle, dom, xe->evtchn_port);
    if ( rc < 0 ) {
        errprint("Failed to bind event channel\n");
        goto err;
    }

xenevtchn_bind_interdomain (linux)

xenevtchn_port_or_error_t xenevtchn_bind_interdomain(xenevtchn_handle *xce,
                                                  uint32_t domid,
                                                  evtchn_port_t remote_port)
{
    int fd = xce->fd;
    struct ioctl_evtchn_bind_interdomain bind;
 
    bind.remote_domain = domid;
    bind.remote_port = remote_port;
 
    return ioctl(fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
}

xe->port = rc;
xe->monitor_mem_access_on = 1;
xe->process_event[VM_EVENT_REASON_MEM_ACCESS] = &process_mem;
xe->process_event[VM_EVENT_REASON_WRITE_CTRLREG] = &process_register;
xe->process_event[VM_EVENT_REASON_MOV_TO_MSR] = &process_msr;
xe->process_event[VM_EVENT_REASON_SOFTWARE_BREAKPOINT] = &process_software_breakpoint;
xe->process_event[VM_EVENT_REASON_SINGLESTEP] = &process_singlestep;
xe->process_event[VM_EVENT_REASON_GUEST_REQUEST] = &process_guest_request;
xe->process_event[VM_EVENT_REASON_DEBUG_EXCEPTION] = &process_debug_exception;
xe->process_event[VM_EVENT_REASON_CPUID] = &process_cpuid;
xe->process_event[VM_EVENT_REASON_PRIVILEGED_CALL] = &process_privcall;
xe->process_event[VM_EVENT_REASON_INTERRUPT] = &process_interrupt;
xe->process_event[VM_EVENT_REASON_DESCRIPTOR_ACCESS] = &process_desc_access;
xe->process_event[VM_EVENT_REASON_EMUL_UNIMPLEMENTED] = &process_unimplemented_emul;
 
vmi->driver.events_listen_ptr = &xen_events_listen;
vmi->driver.set_reg_access_ptr = &xen_set_reg_access;
vmi->driver.set_intr_access_ptr = &xen_set_intr_access;
vmi->driver.set_mem_access_ptr = &xen_set_mem_access;
vmi->driver.start_single_step_ptr = &xen_start_single_step;
vmi->driver.stop_single_step_ptr = &xen_stop_single_step;
vmi->driver.shutdown_single_step_ptr = &xen_shutdown_single_step;
vmi->driver.set_guest_requested_ptr = &xen_set_guest_requested_event;
vmi->driver.set_cpuid_event_ptr = &xen_set_cpuid_event;
vmi->driver.set_debug_event_ptr = &xen_set_debug_event;
vmi->driver.set_privcall_event_ptr = &xen_set_privcall_event;
vmi->driver.set_desc_access_event_ptr = &xen_set_desc_access_event;
vmi->driver.set_failed_emulation_event_ptr = &xen_set_failed_emulation_event;

Get a bitmap of supported monitor events in the form: (1 << XEN_DOMCTL_MONITOR_EVENT_*)

xen->libxcw.xc_monitor_get_capabilities(xch, dom, &xe->monitor_capabilities);
 
#ifdef HAVE_LIBXENSTORE
if ( !xe->process_event[XS_EVENT_REASON_DOMAIN_WATCH] )
    xe->process_xs_event[XS_EVENT_REASON_DOMAIN_WATCH] = &process_domain_watch;
if ( !vmi->driver.set_domain_watch_event_ptr )
    vmi->driver.set_domain_watch_event_ptr = &xen_set_domain_watch_event;
#endif
 
xen->events = xe;
 
dbprint(VMI_DEBUG_XEN, "--Xen common events interface initialized\n");
 
 

/*
 * Starting with Xen 4.13 we have a new libxc API to get the real vm_event version
 * and we don't have to deduce it from the Xen minor version, allowing vm_event
 * versions to be backported to older Xen releases.
 *
 */
if ( xen->libxcw.xc_vm_event_get_version ) {
    int vm_event_abi = xen->libxcw.xc_vm_event_get_version(xch);
    dbprint(VMI_DEBUG_XEN, "--Xen vm_event ABI version: %i\n", vm_event_abi);
 
    switch (vm_event_abi) {
        case 5:
            return init_events_5(vmi);
        case 6:
            return init_events_6(vmi);
        case 7:
            return init_events_7(vmi);
        default:
            errprint("Unsupported Xen vm_event ABI: %i\n", vm_event_abi);
            break;
    };
} else {
    switch (xen->minor_version) {
        case 6 ... 7:
            return init_events_1(vmi);
        case 8 ... 10:
            return init_events_2(vmi);
        case 11:
            return init_events_3(vmi);
        case 12:
            return init_events_4(vmi);
        default:
            errprint("Unsupported Xen events version\n");
            break;
    };
}
 
err:
g_free(xe);
return VMI_FAILURE;
}

🌱 Back to Garden