/xen-4.10.0-shim-comet/xen/arch/x86/mm/hap/ |
A D | guest_walk.c | 72 return gfn_x(INVALID_GFN); in hap_p2m_ga_to_gfn() 79 return gfn_x(INVALID_GFN); in hap_p2m_ga_to_gfn() 112 p2m_mem_paging_populate(p2m->domain, gfn_x(gfn)); in hap_p2m_ga_to_gfn() 113 return gfn_x(INVALID_GFN); in hap_p2m_ga_to_gfn() 118 return gfn_x(INVALID_GFN); in hap_p2m_ga_to_gfn() 124 return gfn_x(gfn); in hap_p2m_ga_to_gfn() 137 return gfn_x(INVALID_GFN); in hap_p2m_ga_to_gfn()
|
A D | nested_ept.c | 191 start = _gfn((gfn_x(start) & ~gfn_lvl_mask) + in nept_walk_tables() 195 gw->lxe[0].epte = (gfn_x(start) << PAGE_SHIFT) | gflags; in nept_walk_tables() 239 *l1gfn = gfn_x(INVALID_GFN); in nept_translate_l2ga()
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/ |
A D | guest_pt.h | 38 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; in gfn_to_paddr() 43 #define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) 93 { return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } in guest_l1e_from_gfn() 95 { return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } in guest_l2e_from_gfn() 180 { return l1e_from_pfn(gfn_x(gfn), flags); } in guest_l1e_from_gfn() 182 { return l2e_from_pfn(gfn_x(gfn), flags); } in guest_l2e_from_gfn() 184 { return l3e_from_pfn(gfn_x(gfn), flags); } in guest_l3e_from_gfn() 187 { return l4e_from_pfn(gfn_x(gfn), flags); } in guest_l4e_from_gfn() 383 return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK); in guest_walk_to_gpa()
|
A D | p2m.h | 331 #define POD_LAST_SUPERPAGE (gfn_x(INVALID_GFN) & ~(gfn_x(INVALID_GFN) >> 1))
|
A D | paging.h | 364 return !(gfn_x(gfn) >> (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT)); in gfn_valid()
|
/xen-4.10.0-shim-comet/xen/arch/x86/mm/ |
A D | p2m.c | 530 put_gfn(p2m->domain, gfn_x(gfn)); in p2m_get_page_from_gfn() 552 fn_mask |= gfn_x(gfn) | todo; in p2m_set_entry() 922 gfn_x(gfn), mfn_x(mfn)); in guest_physmap_add_entry() 1039 unsigned long gfn = gfn_x(first_gfn); in p2m_finish_type_change() 1994 if ( l2_gfn == gfn_x(INVALID_GFN) ) in paging_gva_to_gfn() 1995 return gfn_x(INVALID_GFN); in paging_gva_to_gfn() 2004 return gfn_x(INVALID_GFN); in paging_gva_to_gfn() 2214 __put_gfn(*ap2m, gfn_x(gfn)); in p2m_altp2m_lazy_copy() 2221 __put_gfn(hp2m, gfn_x(gfn)); in p2m_altp2m_lazy_copy() 2234 gfn = _gfn(gfn_x(gfn) & mask); in p2m_altp2m_lazy_copy() [all …]
|
A D | guest_walk.c | 202 if ( !(gfn_x(start) & 1) ) in guest_walk_tables() 207 start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) + in guest_walk_tables() 307 if ( !(gfn_x(start) & 1) ) in guest_walk_tables() 312 start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) + in guest_walk_tables() 316 gw->el1e = (gfn_x(start) << PAGE_SHIFT) | flags; in guest_walk_tables()
|
A D | p2m-pod.c | 718 if ( !superpage_aligned(gfn_x(gfn)) ) in p2m_pod_zero_check_superpage() 842 t.gfn = gfn_x(gfn); in p2m_pod_zero_check_superpage() 1009 t.gfn = gfn_x(gfns[i]); in p2m_pod_zero_check() 1048 start = gfn_x(p2m->pod.reclaim_single); in p2m_pod_emergency_sweep() 1058 for ( i = gfn_x(p2m->pod.reclaim_single); i > 0 ; i-- ) in p2m_pod_emergency_sweep() 1110 if ( gfn_x(gfn) & POD_LAST_SUPERPAGE ) in pod_eager_reclaim() 1112 gfn = _gfn(gfn_x(gfn) & ~POD_LAST_SUPERPAGE); in pod_eager_reclaim() 1125 mrp->list[idx] = gfn_x(INVALID_GFN); in pod_eager_reclaim() 1149 gfn_t gfn_aligned = _gfn((gfn_x(gfn) >> order) << order); in p2m_pod_demand_populate() 1239 t.gfn = gfn_x(gfn); in p2m_pod_demand_populate() [all …]
|
A D | mem_sharing.c | 740 mfn = get_gfn_query(d, gfn_x(gfn), &p2mt); in debug_gfn() 743 d->domain_id, gfn_x(gfn)); in debug_gfn() 745 put_gfn(d, gfn_x(gfn)); in debug_gfn() 783 mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma, 0, NULL); in nominate_page() 797 gfn_x(gfn), mfn_x(mfn), d->domain_id); in nominate_page() 827 amfn = get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma, 0, NULL); in nominate_page() 867 if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL ) in nominate_page() 876 BUG_ON(p2m_change_type_one(d, gfn_x(gfn), p2mt, p2m_ram_shared)); in nominate_page() 890 put_gfn(d, gfn_x(gfn)); in nominate_page() 906 get_two_gfns(sd, gfn_x(sgfn), &smfn_type, NULL, &smfn, in share_pages() [all …]
|
A D | mem_access.c | 218 req->u.mem_access.gfn = gfn_x(gfn); in p2m_mem_access_check() 247 unsigned long gfn_l = gfn_x(gfn); in p2m_set_altp2m_mem_access() 377 for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) in p2m_set_mem_access()
|
A D | p2m-ept.c | 682 unsigned long gfn = gfn_x(gfn_); in ept_set_entry() 920 unsigned long gfn = gfn_x(gfn_); in ept_get_entry() 1376 p2m->min_remapped_gfn = gfn_x(INVALID_GFN); in p2m_init_altp2m_ept()
|
/xen-4.10.0-shim-comet/xen/arch/arm/ |
A D | p2m.c | 203 root_table = gfn_x(gfn) >> (level_orders[P2M_ROOT_LEVEL - 1]); in p2m_get_root_pointer() 224 ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn)); in p2m_mem_access_radix_get() 323 if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) ) in p2m_get_entry() 327 gfn_x(p2m->max_mapped_gfn) ) in p2m_get_entry() 594 radix_tree_delete(&p2m->mem_access_settings, gfn_x(gfn)); in p2m_mem_access_radix_set() 598 rc = radix_tree_insert(&p2m->mem_access_settings, gfn_x(gfn), in p2m_mem_access_radix_set() 605 &p2m->mem_access_settings, gfn_x(gfn)), in p2m_mem_access_radix_set() 998 mask |= gfn_x(sgfn) | nr; in p2m_set_entry() 1313 for ( ; gfn_x(start) < gfn_x(end); in relinquish_p2m_mapping() 1378 for ( ; gfn_x(start) < gfn_x(end); start = next_gfn ) in p2m_cache_flush() [all …]
|
A D | mem_access.c | 65 i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn)); in __p2m_get_mem_access() 415 start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn); in p2m_set_mem_access()
|
A D | vpl011.c | 465 gfn_x(info->gfn), in domain_vpl011_init()
|
/xen-4.10.0-shim-comet/xen/include/xen/ |
A D | mm.h | 107 #define gfn_x macro 110 #undef gfn_x 115 return _gfn(gfn_x(gfn) + i); in gfn_add() 120 return _gfn(max(gfn_x(x), gfn_x(y))); in gfn_max() 125 return _gfn(min(gfn_x(x), gfn_x(y))); in gfn_min() 130 return gfn_x(x) == gfn_x(y); in gfn_eq()
|
/xen-4.10.0-shim-comet/xen/arch/x86/mm/shadow/ |
A D | multi.c | 2781 d.gfn=gfn_x(gfn); in trace_shadow_emulate_other() 3101 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3147 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3156 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3183 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3297 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3367 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3546 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3557 put_gfn(d, gfn_x(gfn)); in sh_page_fault() 3731 return gfn_x(gfn); in sh_gva_to_gfn() [all …]
|
A D | none.c | 50 return gfn_x(INVALID_GFN); in _gva_to_gfn()
|
A D | types.h | 198 #define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), 0) 327 | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT) in sh_l1e_mmio()
|
/xen-4.10.0-shim-comet/xen/arch/x86/ |
A D | debug.c | 61 mfn = get_gfn(dp, gfn_x(*gfn), &gfntype); in dbg_hvm_va2mfn() 73 put_gfn(dp, gfn_x(*gfn)); in dbg_hvm_va2mfn() 192 put_gfn(dp, gfn_x(gfn)); in dbg_rw_guest_mem()
|
/xen-4.10.0-shim-comet/xen/include/asm-arm/ |
A D | grant_table.h | 69 (i < (t)->max_grant_frames))? 0 : gfn_x((t)->arch.gfn[i]))
|
A D | p2m.h | 344 gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1)); in gfn_next_boundary()
|
A D | mm.h | 234 #define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn))
|
/xen-4.10.0-shim-comet/xen/drivers/passthrough/x86/ |
A D | iommu.c | 64 if ( gfn != gfn_x(INVALID_GFN) ) in arch_iommu_populate_page_table()
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/ |
A D | mtrr.c | 550 if ( ((gfn_x(gfn) & mask) >= range->start) && in hvm_get_mem_pinned_cacheattr() 551 ((gfn_x(gfn) | ~mask) <= range->end) ) in hvm_get_mem_pinned_cacheattr() 556 if ( ((gfn_x(gfn) & mask) <= range->end) && in hvm_get_mem_pinned_cacheattr() 557 ((gfn_x(gfn) | ~mask) >= range->start) ) in hvm_get_mem_pinned_cacheattr()
|
A D | ioreq.c | 207 if ( gfn != gfn_x(INVALID_GFN) ) in hvm_free_ioreq_gfn() 424 if ( bufioreq_gfn != gfn_x(INVALID_GFN) ) in hvm_ioreq_server_map_pages() 438 unsigned long ioreq_gfn = gfn_x(INVALID_GFN); in hvm_ioreq_server_setup_pages() 439 unsigned long bufioreq_gfn = gfn_x(INVALID_GFN); in hvm_ioreq_server_setup_pages()
|