Lines Matching refs:sptep
145 u64 *sptep; member
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
171 static void mmu_spte_set(u64 *sptep, u64 spte);
276 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep() argument
278 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in kvm_flush_remote_tlbs_sptep()
279 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep()
284 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
289 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
290 mmu_spte_set(sptep, spte); in mark_mmio_spte()
329 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
331 WRITE_ONCE(*sptep, spte); in __set_spte()
334 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
336 WRITE_ONCE(*sptep, spte); in __update_clear_spte_fast()
339 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
341 return xchg(sptep, spte); in __update_clear_spte_slow()
344 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
346 return READ_ONCE(*sptep); in __get_spte_lockless()
357 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
359 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in count_spte_clear()
369 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
373 ssptep = (union split_spte *)sptep; in __set_spte()
388 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
392 ssptep = (union split_spte *)sptep; in __update_clear_spte_fast()
404 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
407 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
411 ssptep = (union split_spte *)sptep; in __update_clear_spte_slow()
418 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
441 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
443 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in __get_spte_lockless()
444 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless()
471 static void mmu_spte_set(u64 *sptep, u64 new_spte) in mmu_spte_set() argument
473 WARN_ON(is_shadow_present_pte(*sptep)); in mmu_spte_set()
474 __set_spte(sptep, new_spte); in mmu_spte_set()
481 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) in mmu_spte_update_no_track() argument
483 u64 old_spte = *sptep; in mmu_spte_update_no_track()
489 mmu_spte_set(sptep, new_spte); in mmu_spte_update_no_track()
494 __update_clear_spte_fast(sptep, new_spte); in mmu_spte_update_no_track()
496 old_spte = __update_clear_spte_slow(sptep, new_spte); in mmu_spte_update_no_track()
512 static bool mmu_spte_update(u64 *sptep, u64 new_spte) in mmu_spte_update() argument
515 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); in mmu_spte_update()
553 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits() argument
556 u64 old_spte = *sptep; in mmu_spte_clear_track_bits()
557 int level = sptep_to_sp(sptep)->role.level; in mmu_spte_clear_track_bits()
562 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_track_bits()
564 old_spte = __update_clear_spte_slow(sptep, 0ull); in mmu_spte_clear_track_bits()
596 static void mmu_spte_clear_no_track(u64 *sptep) in mmu_spte_clear_no_track() argument
598 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_no_track()
601 static u64 mmu_spte_get_lockless(u64 *sptep) in mmu_spte_get_lockless() argument
603 return __get_spte_lockless(sptep); in mmu_spte_get_lockless()
607 static bool mmu_spte_age(u64 *sptep) in mmu_spte_age() argument
609 u64 spte = mmu_spte_get_lockless(sptep); in mmu_spte_age()
616 (unsigned long *)sptep); in mmu_spte_age()
626 mmu_spte_update_no_track(sptep, spte); in mmu_spte_age()
1012 struct kvm_rmap_head *rmap_head, u64 *sptep) in kvm_zap_one_rmap_spte() argument
1014 mmu_spte_clear_track_bits(kvm, sptep); in kvm_zap_one_rmap_spte()
1015 pte_list_remove(sptep, rmap_head); in kvm_zap_one_rmap_spte()
1128 u64 *sptep; in rmap_get_first() local
1135 sptep = (u64 *)rmap_head->val; in rmap_get_first()
1141 sptep = iter->desc->sptes[iter->pos]; in rmap_get_first()
1143 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_first()
1144 return sptep; in rmap_get_first()
1154 u64 *sptep; in rmap_get_next() local
1159 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1160 if (sptep) in rmap_get_next()
1169 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1176 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_next()
1177 return sptep; in rmap_get_next()
1184 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1186 u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep); in drop_spte()
1189 rmap_remove(kvm, sptep); in drop_spte()
1192 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) in drop_large_spte() argument
1196 sp = sptep_to_sp(sptep); in drop_large_spte()
1199 drop_spte(kvm, sptep); in drop_large_spte()
1202 kvm_flush_remote_tlbs_sptep(kvm, sptep); in drop_large_spte()
1218 static bool spte_write_protect(u64 *sptep, bool pt_protect) in spte_write_protect() argument
1220 u64 spte = *sptep; in spte_write_protect()
1226 rmap_printk("spte %p %llx\n", sptep, *sptep); in spte_write_protect()
1232 return mmu_spte_update(sptep, spte); in spte_write_protect()
1238 u64 *sptep; in rmap_write_protect() local
1242 for_each_rmap_spte(rmap_head, &iter, sptep) in rmap_write_protect()
1243 flush |= spte_write_protect(sptep, pt_protect); in rmap_write_protect()
1248 static bool spte_clear_dirty(u64 *sptep) in spte_clear_dirty() argument
1250 u64 spte = *sptep; in spte_clear_dirty()
1252 rmap_printk("spte %p %llx\n", sptep, *sptep); in spte_clear_dirty()
1256 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1259 static bool spte_wrprot_for_clear_dirty(u64 *sptep) in spte_wrprot_for_clear_dirty() argument
1262 (unsigned long *)sptep); in spte_wrprot_for_clear_dirty()
1263 if (was_writable && !spte_ad_enabled(*sptep)) in spte_wrprot_for_clear_dirty()
1264 kvm_set_pfn_dirty(spte_to_pfn(*sptep)); in spte_wrprot_for_clear_dirty()
1278 u64 *sptep; in __rmap_clear_dirty() local
1282 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_clear_dirty()
1283 if (spte_ad_need_write_protect(*sptep)) in __rmap_clear_dirty()
1284 flush |= spte_wrprot_for_clear_dirty(sptep); in __rmap_clear_dirty()
1286 flush |= spte_clear_dirty(sptep); in __rmap_clear_dirty()
1454 u64 *sptep; in kvm_set_pte_rmap() local
1464 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_set_pte_rmap()
1466 sptep, *sptep, gfn, level); in kvm_set_pte_rmap()
1471 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); in kvm_set_pte_rmap()
1475 *sptep, new_pfn); in kvm_set_pte_rmap()
1477 mmu_spte_clear_track_bits(kvm, sptep); in kvm_set_pte_rmap()
1478 mmu_spte_set(sptep, new_spte); in kvm_set_pte_rmap()
1608 u64 *sptep; in kvm_age_rmap() local
1612 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_age_rmap()
1613 young |= mmu_spte_age(sptep); in kvm_age_rmap()
1622 u64 *sptep; in kvm_test_age_rmap() local
1625 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_test_age_rmap()
1626 if (is_accessed_spte(*sptep)) in kvm_test_age_rmap()
1772 u64 *sptep; in kvm_mmu_mark_parents_unsync() local
1775 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { in kvm_mmu_mark_parents_unsync()
1776 mark_unsync(sptep); in kvm_mmu_mark_parents_unsync()
2237 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, in kvm_mmu_child_role() argument
2240 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep); in kvm_mmu_child_role()
2277 role.quadrant = spte_index(sptep) & 1; in kvm_mmu_child_role()
2284 u64 *sptep, gfn_t gfn, in kvm_mmu_get_child_sp() argument
2289 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) in kvm_mmu_get_child_sp()
2292 role = kvm_mmu_child_role(sptep, direct, access); in kvm_mmu_get_child_sp()
2338 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; in shadow_walk_okay()
2356 __shadow_walk_next(iterator, *iterator->sptep); in shadow_walk_next()
2360 struct kvm_mmu_memory_cache *cache, u64 *sptep, in __link_shadow_page() argument
2372 if (is_shadow_present_pte(*sptep)) in __link_shadow_page()
2373 drop_large_spte(kvm, sptep, flush); in __link_shadow_page()
2377 mmu_spte_set(sptep, spte); in __link_shadow_page()
2379 mmu_page_add_parent_pte(cache, sp, sptep); in __link_shadow_page()
2391 mark_unsync(sptep); in __link_shadow_page()
2394 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, in link_shadow_page() argument
2397 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true); in link_shadow_page()
2400 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2403 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { in validate_direct_spte()
2413 child = spte_to_child_sp(*sptep); in validate_direct_spte()
2417 drop_parent_pte(child, sptep); in validate_direct_spte()
2418 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); in validate_direct_spte()
2468 u64 *sptep; in kvm_mmu_unlink_parents() local
2471 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2472 drop_parent_pte(sp, sptep); in kvm_mmu_unlink_parents()
2840 u64 *sptep, unsigned int pte_access, gfn_t gfn, in mmu_set_spte() argument
2843 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in mmu_set_spte()
2857 *sptep, write_fault, gfn); in mmu_set_spte()
2861 mark_mmio_spte(vcpu, sptep, gfn, pte_access); in mmu_set_spte()
2865 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
2870 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { in mmu_set_spte()
2872 u64 pte = *sptep; in mmu_set_spte()
2875 drop_parent_pte(child, sptep); in mmu_set_spte()
2877 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
2879 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
2880 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2886 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, in mmu_set_spte()
2889 if (*sptep == spte) { in mmu_set_spte()
2892 flush |= mmu_spte_update(sptep, spte); in mmu_set_spte()
2893 trace_kvm_mmu_set_spte(level, gfn, sptep); in mmu_set_spte()
2904 pgprintk("%s: setting spte %llx\n", __func__, *sptep); in mmu_set_spte()
2908 rmap_add(vcpu, slot, sptep, gfn, pte_access); in mmu_set_spte()
2911 kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access); in mmu_set_spte()
2946 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch() argument
2953 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
2957 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2970 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
2974 sp = sptep_to_sp(sptep); in direct_pte_prefetch()
2994 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
3179 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in direct_map()
3185 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL); in direct_map()
3189 link_shadow_page(vcpu, it.sptep, sp); in direct_map()
3198 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL, in direct_map()
3203 direct_pte_prefetch(vcpu, it.sptep); in direct_map()
3309 u64 *sptep, u64 old_spte, u64 new_spte) in fast_pf_fix_direct_spte() argument
3323 if (!try_cmpxchg64(sptep, &old_spte, new_spte)) in fast_pf_fix_direct_spte()
3357 u64 *sptep = NULL; in fast_pf_get_last_sptep() local
3360 sptep = iterator.sptep; in fast_pf_get_last_sptep()
3364 return sptep; in fast_pf_get_last_sptep()
3375 u64 *sptep = NULL; in fast_page_fault() local
3387 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
3389 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
3394 sp = sptep_to_sp(sptep); in fast_page_fault()
3463 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) { in fast_page_fault()
3475 trace_fast_page_fault(vcpu, fault, sptep, spte, ret); in fast_page_fault()
4043 spte = mmu_spte_get_lockless(iterator.sptep); in get_walk()
4052 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte() argument
4069 *sptep = 0ull; in get_mmio_spte()
4073 *sptep = sptes[leaf]; in get_mmio_spte()
4161 clear_sp_write_flooding_count(iterator.sptep); in shadow_page_table_clear_flood()
4612 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
4615 if (unlikely(is_mmio_spte(*sptep))) { in sync_mmio_spte()
4616 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
4617 mmu_spte_clear_no_track(sptep); in sync_mmio_spte()
4621 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
6315 u64 *sptep, spte; in shadow_mmu_split_huge_page() local
6322 sptep = &sp->spt[index]; in shadow_mmu_split_huge_page()
6339 if (is_shadow_present_pte(*sptep)) { in shadow_mmu_split_huge_page()
6340 flush |= !is_last_spte(*sptep, sp->role.level); in shadow_mmu_split_huge_page()
6345 mmu_spte_set(sptep, spte); in shadow_mmu_split_huge_page()
6346 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); in shadow_mmu_split_huge_page()
6511 u64 *sptep; in kvm_mmu_zap_collapsible_spte() local
6517 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_mmu_zap_collapsible_spte()
6518 sp = sptep_to_sp(sptep); in kvm_mmu_zap_collapsible_spte()
6530 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); in kvm_mmu_zap_collapsible_spte()
6533 kvm_flush_remote_tlbs_sptep(kvm, sptep); in kvm_mmu_zap_collapsible_spte()