status_t vmi_set_mem_event(vmi_instance_t vmi, addr_t gfn, vmi_mem_access_t access, uint16_t slat_id){#ifdef ENABLE_SAFETY_CHECKS if (!vmi) return VMI_FAILURE;#endif if ( VMI_MEMACCESS_N != access ) { bool handler_found = 0; GHashTableIter i; vmi_mem_access_t *key = NULL; vmi_event_t *event = NULL; ghashtable_foreach(vmi->mem_events_generic, i, &key, &event) { if ( (*key) & access ) { handler_found = 1; break; } } if ( !handler_found ) { dbprint(VMI_DEBUG_EVENTS, "It is unsafe to set mem access without a handler being registered!\n"); return VMI_FAILURE; } } if ( VMI_SUCCESS == driver_set_mem_access(vmi, gfn, access, slat_id) ) { if ( gfn > (vmi->max_physical_address >> vmi->page_shift) ) vmi->max_physical_address = gfn << vmi->page_shift; return VMI_SUCCESS; } return VMI_FAILURE;}
This is just a sanity check function to call driver_set_mem_access() , that will eventually call a function, stored as a pointer at vmi->driver.set_mem_access_ptr . In the case of XEN, this function will be:
status_t xen_set_mem_access(vmi_instance_t vmi, addr_t gpfn, vmi_mem_access_t page_access_flag, uint16_t altp2m_idx){ int rc; xenmem_access_t access; xen_instance_t *xen = xen_get_instance(vmi); xc_interface * xch = xen_get_xchandle(vmi); domid_t dom = xen_get_domainid(vmi);#ifdef ENABLE_SAFETY_CHECKS if ( !xch ) { errprint("%s error: invalid xc_interface handle\n", __FUNCTION__); return VMI_FAILURE; } if ( dom == (domid_t)VMI_INVALID_DOMID ) { errprint("%s error: invalid domid\n", __FUNCTION__); return VMI_FAILURE; }#endif if ( VMI_FAILURE == convert_vmi_flags_to_xenmem(page_access_flag, &access) ) return VMI_FAILURE; if ( !altp2m_idx ) rc = xen->libxcw.xc_set_mem_access(xch, dom, access, gpfn, 1); // 1 page at a time else rc = xen->libxcw.xc_altp2m_set_mem_access(xch, dom, altp2m_idx, gpfn, access); if (rc) { errprint("xc_hvm_set_mem_access failed with code: %d\n", rc); return VMI_FAILURE; } dbprint(VMI_DEBUG_XEN, "--Done Setting memaccess on GPFN: %"PRIu64"\n", gpfn); return VMI_SUCCESS;}
long osdep_hypercall(xencall_handle *xcall, privcmd_hypercall_t *hypercall){ return ioctl(xcall->fd, IOCTL_PRIVCMD_HYPERCALL, hypercall);}
linux/drivers/xen/privcmd.c
static long privcmd_ioctl(struct file *file, unsigned int cmd, unsigned long data){ int ret = -ENOTTY; void __user *udata = (void __user *) data; switch (cmd) { case IOCTL_PRIVCMD_HYPERCALL: ret = privcmd_ioctl_hypercall(file, udata); break; case IOCTL_PRIVCMD_MMAP: ret = privcmd_ioctl_mmap(file, udata); break; case IOCTL_PRIVCMD_MMAPBATCH: ret = privcmd_ioctl_mmap_batch(file, udata, 1); break; case IOCTL_PRIVCMD_MMAPBATCH_V2: ret = privcmd_ioctl_mmap_batch(file, udata, 2); break; case IOCTL_PRIVCMD_DM_OP: ret = privcmd_ioctl_dm_op(file, udata); break; case IOCTL_PRIVCMD_RESTRICT: ret = privcmd_ioctl_restrict(file, udata); break; case IOCTL_PRIVCMD_MMAP_RESOURCE: ret = privcmd_ioctl_mmap_resource(file, udata); break; default: break; } return ret;}
linux/drivers/xen/privcmd.c
static long privcmd_ioctl_hypercall(struct file *file, void __user *udata){ struct privcmd_data *data = file->private_data; struct privcmd_hypercall hypercall; long ret; /* Disallow arbitrary hypercalls if restricted */ if (data->domid != DOMID_INVALID) return -EPERM; if (copy_from_user(&hypercall, udata, sizeof(hypercall))) return -EFAULT; xen_preemptible_hcall_begin(); ret = privcmd_call(hypercall.op, hypercall.arg[0], hypercall.arg[1], hypercall.arg[2], hypercall.arg[3], hypercall.arg[4]); xen_preemptible_hcall_end(); return ret;}
linux/arch/arm64/xen/hypercall.S
#include <linux/linkage.h>#include <asm/assembler.h>#include <asm/asm-uaccess.h>#include <xen/interface/xen.h>#define XEN_IMM 0xEA1#define HYPERCALL_SIMPLE(hypercall) \SYM_FUNC_START(HYPERVISOR_##hypercall) \ mov x16, #__HYPERVISOR_##hypercall; \ hvc XEN_IMM; \ ret; \SYM_FUNC_END(HYPERVISOR_##hypercall)#define HYPERCALL0 HYPERCALL_SIMPLE#define HYPERCALL1 HYPERCALL_SIMPLE#define HYPERCALL2 HYPERCALL_SIMPLE#define HYPERCALL3 HYPERCALL_SIMPLE#define HYPERCALL4 HYPERCALL_SIMPLE#define HYPERCALL5 HYPERCALL_SIMPLE .textHYPERCALL2(xen_version);HYPERCALL3(console_io);HYPERCALL3(grant_table_op);HYPERCALL2(sched_op);HYPERCALL2(event_channel_op);HYPERCALL2(hvm_op);HYPERCALL2(memory_op);HYPERCALL2(physdev_op);HYPERCALL3(vcpu_op);HYPERCALL1(platform_op_raw);HYPERCALL2(multicall);HYPERCALL2(vm_assist);HYPERCALL3(dm_op);SYM_FUNC_START(privcmd_call) mov x16, x0 mov x0, x1 mov x1, x2 mov x2, x3 mov x3, x4 mov x4, x5 /* * Privcmd calls are issued by the userspace. The kernel needs to * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 * translations to user memory via AT instructions. Since AT * instructions are not affected by the PAN bit (ARMv8.1), we only * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* * Disable userspace access from kernel once the hyp call completed. */ uaccess_ttbr0_disable x6, x7 retSYM_FUNC_END(privcmd_call);
static int do_altp2m_op( XEN_GUEST_HANDLE_PARAM(void) arg){ struct xen_hvm_altp2m_op a; struct domain *d = NULL; int rc = 0; uint64_t mode; if ( !hvm_altp2m_supported() ) return -EOPNOTSUPP; if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; if ( a.pad1 || a.pad2 || (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) return -EINVAL; switch ( a.cmd ) { case HVMOP_altp2m_get_domain_state: case HVMOP_altp2m_set_domain_state: case HVMOP_altp2m_vcpu_enable_notify: case HVMOP_altp2m_vcpu_disable_notify: case HVMOP_altp2m_create_p2m: case HVMOP_altp2m_destroy_p2m: case HVMOP_altp2m_switch_p2m: case HVMOP_altp2m_set_suppress_ve: case HVMOP_altp2m_set_suppress_ve_multi: case HVMOP_altp2m_get_suppress_ve: case HVMOP_altp2m_set_mem_access: case HVMOP_altp2m_set_mem_access_multi: case HVMOP_altp2m_get_mem_access: case HVMOP_altp2m_change_gfn: case HVMOP_altp2m_get_p2m_idx: case HVMOP_altp2m_set_visibility: break; default: return -EOPNOTSUPP; } d = rcu_lock_domain_by_any_id(a.domain); if ( d == NULL ) return -ESRCH; if ( !is_hvm_domain(d) ) { rc = -EOPNOTSUPP; goto out; } if ( (a.cmd != HVMOP_altp2m_get_domain_state) && (a.cmd != HVMOP_altp2m_set_domain_state) && !d->arch.altp2m_active ) { rc = -EOPNOTSUPP; goto out; } mode = d->arch.hvm.params[HVM_PARAM_ALTP2M]; if ( XEN_ALTP2M_disabled == mode ) { rc = -EINVAL; goto out; } if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) ) goto out; switch ( a.cmd ) { (...) case HVMOP_altp2m_set_mem_access: if ( a.u.mem_access.pad ) rc = -EINVAL; else rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0, a.u.mem_access.access, a.u.mem_access.view); break; (...) default: ASSERT_UNREACHABLE(); } out: rcu_unlock_domain(d); return rc;}