long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg){ long rc = 0; /* * NB: hvm_op can be part of a restarted hypercall; but at the * moment the only hypercalls which do continuations don't need to * store any iteration information (since they're just re-trying * the acquisition of a lock). */ switch ( op ) {(...) case HVMOP_altp2m: rc = current->hcall_compat ? compat_altp2m_op(arg) : do_altp2m_op(arg); break;(...)
/* * Set access type for a region of pfns. * If gfn == INVALID_GFN, sets the default access type. */long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, uint32_t start, uint32_t mask, xenmem_access_t access, unsigned int altp2m_idx){ struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_access_t a; unsigned int order; long rc = 0; static const p2m_access_t memaccess[] = {#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx),#undef ACCESS }; switch ( access ) { case 0 ... ARRAY_SIZE(memaccess) - 1: a = memaccess[access]; break; case XENMEM_access_default: a = p2m->default_access; break; default: return -EINVAL; } /* * Flip mem_access_enabled to true when a permission is set, as to prevent * allocating or inserting super-pages. */ p2m->mem_access_enabled = true; /* If request to set default access. */ if ( gfn_eq(gfn, INVALID_GFN) ) { p2m->default_access = a; return 0; } p2m_write_lock(p2m); for ( gfn = gfn_add(gfn, start); nr > start; gfn = gfn_next_boundary(gfn, order) ) { p2m_type_t t; mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, NULL); if ( !mfn_eq(mfn, INVALID_MFN) ) { order = 0; rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a); if ( rc ) break; } start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn); /* Check for continuation if it is not the last iteration */ if ( nr > start && !(start & mask) && hypercall_preempt_check() ) { rc = start; break; } } p2m_write_unlock(p2m); return rc;}
int p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned long nr, mfn_t smfn, p2m_type_t t, p2m_access_t a){ int rc = 0; while ( nr ) { unsigned long mask; unsigned long order; /* * Don't take into account the MFN when removing mapping (i.e * MFN_INVALID) to calculate the correct target order. * * XXX: Support superpage mappings if nr is not aligned to a * superpage size. */ mask = !mfn_eq(smfn, INVALID_MFN) ? mfn_x(smfn) : 0; mask |= gfn_x(sgfn) | nr; /* Always map 4k by 4k when memaccess is enabled */ if ( unlikely(p2m->mem_access_enabled) ) order = THIRD_ORDER; else if ( !(mask & ((1UL << FIRST_ORDER) - 1)) ) order = FIRST_ORDER; else if ( !(mask & ((1UL << SECOND_ORDER) - 1)) ) order = SECOND_ORDER; else order = THIRD_ORDER; rc = __p2m_set_entry(p2m, sgfn, order, smfn, t, a); if ( rc ) break; sgfn = gfn_add(sgfn, (1 << order)); if ( !mfn_eq(smfn, INVALID_MFN) ) smfn = mfn_add(smfn, (1 << order)); nr -= (1 << order); } return rc;}
/* * Insert an entry in the p2m. This should be called with a mapping * equal to a page/superpage (4K, 2M, 1G). */static int __p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned int page_order, mfn_t smfn, p2m_type_t t, p2m_access_t a){ unsigned int level = 0; unsigned int target = 3 - (page_order / LPAE_SHIFT); lpae_t *entry, *table, orig_pte; int rc; /* A mapping is removed if the MFN is invalid. */ bool removing_mapping = mfn_eq(smfn, INVALID_MFN); DECLARE_OFFSETS(offsets, gfn_to_gaddr(sgfn)); ASSERT(p2m_is_write_locked(p2m)); /* * Check if the level target is valid: we only support * 4K - 2M - 1G mapping. */ ASSERT(target > 0 && target <= 3); table = p2m_get_root_pointer(p2m, sgfn); if ( !table ) return -EINVAL; for ( level = P2M_ROOT_LEVEL; level < target; level++ ) { /* * Don't try to allocate intermediate page table if the mapping * is about to be removed. */ rc = p2m_next_level(p2m, removing_mapping, level, &table, offsets[level]); if ( rc == GUEST_TABLE_MAP_FAILED ) { /* * We are here because p2m_next_level has failed to map * the intermediate page table (e.g the table does not exist * and they p2m tree is read-only). It is a valid case * when removing a mapping as it may not exist in the * page table. In this case, just ignore it. */ rc = removing_mapping ? 0 : -ENOENT; goto out; } else if ( rc != GUEST_TABLE_NORMAL_PAGE ) break; } entry = table + offsets[level]; /* * If we are here with level < target, we must be at a leaf node, * and we need to break up the superpage. */ if ( level < target ) { /* We need to split the original page. */ lpae_t split_pte = *entry; ASSERT(p2m_is_superpage(*entry, level)); if ( !p2m_split_superpage(p2m, &split_pte, level, target, offsets) ) { /* * The current super-page is still in-place, so re-increment * the stats. */ p2m->stats.mappings[level]++; /* Free the allocated sub-tree */ p2m_free_entry(p2m, split_pte, level); rc = -ENOMEM; goto out; } /* * Follow the break-before-sequence to update the entry. * For more details see (D4.7.1 in ARM DDI 0487A.j). */ p2m_remove_pte(entry, p2m->clean_pte); p2m_force_tlb_flush_sync(p2m); p2m_write_pte(entry, split_pte, p2m->clean_pte); /* then move to the level we want to make real changes */ for ( ; level < target; level++ ) { rc = p2m_next_level(p2m, true, level, &table, offsets[level]); /* * The entry should be found and either be a table * or a superpage if level 3 is not targeted */ ASSERT(rc == GUEST_TABLE_NORMAL_PAGE || (rc == GUEST_TABLE_SUPER_PAGE && target < 3)); } entry = table + offsets[level]; } /* * We should always be there with the correct level because * all the intermediate tables have been installed if necessary. */ ASSERT(level == target); orig_pte = *entry; /* * The radix-tree can only work on 4KB. This is only used when * memaccess is enabled and during shutdown. */ ASSERT(!p2m->mem_access_enabled || page_order == 0 || p2m->domain->is_dying); /* * The access type should always be p2m_access_rwx when the mapping * is removed. */ ASSERT(!mfn_eq(INVALID_MFN, smfn) || (a == p2m_access_rwx)); /* * Update the mem access permission before update the P2M. So we * don't have to revert the mapping if it has failed. */ rc = p2m_mem_access_radix_set(p2m, sgfn, a); if ( rc ) goto out; /* * Always remove the entry in order to follow the break-before-make * sequence when updating the translation table (D4.7.1 in ARM DDI * 0487A.j). */ if ( lpae_is_valid(orig_pte) ) p2m_remove_pte(entry, p2m->clean_pte); if ( removing_mapping ) /* Flush can be deferred if the entry is removed */ p2m->need_flush |= !!lpae_is_valid(orig_pte); else { lpae_t pte = mfn_to_p2m_entry(smfn, t, a); if ( level < 3 ) pte.p2m.table = 0; /* Superpage entry */ /* * It is necessary to flush the TLB before writing the new entry * to keep coherency when the previous entry was valid. * * Although, it could be defered when only the permissions are * changed (e.g in case of memaccess). */ if ( lpae_is_valid(orig_pte) ) { if ( likely(!p2m->mem_access_enabled) || P2M_CLEAR_PERM(pte) != P2M_CLEAR_PERM(orig_pte) ) p2m_force_tlb_flush_sync(p2m); else p2m->need_flush = true; } else if ( !p2m_is_valid(orig_pte) ) /* new mapping */ p2m->stats.mappings[level]++; p2m_write_pte(entry, pte, p2m->clean_pte); p2m->max_mapped_gfn = gfn_max(p2m->max_mapped_gfn, gfn_add(sgfn, (1UL << page_order) - 1)); p2m->lowest_mapped_gfn = gfn_min(p2m->lowest_mapped_gfn, sgfn); } if ( is_iommu_enabled(p2m->domain) && (lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) ) { unsigned int flush_flags = 0; if ( lpae_is_valid(orig_pte) ) flush_flags |= IOMMU_FLUSHF_modified; if ( lpae_is_valid(*entry) ) flush_flags |= IOMMU_FLUSHF_added; rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)), 1UL << page_order, flush_flags); } else rc = 0; /* * Free the entry only if the original pte was valid and the base * is different (to avoid freeing when permission is changed). */ if ( p2m_is_valid(orig_pte) && !mfn_eq(lpae_get_mfn(*entry), lpae_get_mfn(orig_pte)) ) p2m_free_entry(p2m, orig_pte, level);out: unmap_domain_page(table); return rc;}
/* Write a pagetable entry. * * If the table entry is changing a text mapping, it is responsibility * of the caller to issue an ISB after write_pte. */static inline void write_pte(lpae_t *p, lpae_t pte){ asm volatile ( /* Ensure any writes have completed with the old mappings. */ "dsb;" /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ "strd %0, %H0, [%1];" "dsb;" : : "r" (pte.bits), "r" (p) : "memory");}