Lines Matching refs:gpc
29 struct gfn_to_pfn_cache *gpc; in gfn_to_pfn_cache_invalidate_start() local
33 list_for_each_entry(gpc, &kvm->gpc_list, list) { in gfn_to_pfn_cache_invalidate_start()
34 write_lock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && in gfn_to_pfn_cache_invalidate_start()
38 gpc->uhva >= start && gpc->uhva < end) { in gfn_to_pfn_cache_invalidate_start()
39 gpc->valid = false; in gfn_to_pfn_cache_invalidate_start()
45 if (gpc->usage & KVM_GUEST_USES_PFN) { in gfn_to_pfn_cache_invalidate_start()
50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); in gfn_to_pfn_cache_invalidate_start()
53 write_unlock_irq(&gpc->lock); in gfn_to_pfn_cache_invalidate_start()
79 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) in kvm_gpc_check() argument
81 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in kvm_gpc_check()
83 if (!gpc->active) in kvm_gpc_check()
86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) in kvm_gpc_check()
89 if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) in kvm_gpc_check()
92 if (!gpc->valid) in kvm_gpc_check()
140 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) in hva_to_pfn_retry() argument
143 void *old_khva = gpc->khva - offset_in_page(gpc->khva); in hva_to_pfn_retry()
148 lockdep_assert_held(&gpc->refresh_lock); in hva_to_pfn_retry()
150 lockdep_assert_held_write(&gpc->lock); in hva_to_pfn_retry()
157 gpc->valid = false; in hva_to_pfn_retry()
160 mmu_seq = gpc->kvm->mmu_invalidate_seq; in hva_to_pfn_retry()
163 write_unlock_irq(&gpc->lock); in hva_to_pfn_retry()
186 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL); in hva_to_pfn_retry()
195 if (gpc->usage & KVM_HOST_USES_PFN) { in hva_to_pfn_retry()
196 if (new_pfn == gpc->pfn) { in hva_to_pfn_retry()
211 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
217 WARN_ON_ONCE(gpc->valid); in hva_to_pfn_retry()
218 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); in hva_to_pfn_retry()
220 gpc->valid = true; in hva_to_pfn_retry()
221 gpc->pfn = new_pfn; in hva_to_pfn_retry()
222 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); in hva_to_pfn_retry()
234 write_lock_irq(&gpc->lock); in hva_to_pfn_retry()
239 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, in __kvm_gpc_refresh() argument
242 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); in __kvm_gpc_refresh()
262 mutex_lock(&gpc->refresh_lock); in __kvm_gpc_refresh()
264 write_lock_irq(&gpc->lock); in __kvm_gpc_refresh()
266 if (!gpc->active) { in __kvm_gpc_refresh()
271 old_pfn = gpc->pfn; in __kvm_gpc_refresh()
272 old_khva = gpc->khva - offset_in_page(gpc->khva); in __kvm_gpc_refresh()
273 old_uhva = gpc->uhva; in __kvm_gpc_refresh()
276 if (gpc->gpa != gpa || gpc->generation != slots->generation || in __kvm_gpc_refresh()
277 kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
280 gpc->gpa = gpa; in __kvm_gpc_refresh()
281 gpc->generation = slots->generation; in __kvm_gpc_refresh()
282 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh()
283 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh()
285 if (kvm_is_error_hva(gpc->uhva)) { in __kvm_gpc_refresh()
295 if (!gpc->valid || old_uhva != gpc->uhva) { in __kvm_gpc_refresh()
296 ret = hva_to_pfn_retry(gpc); in __kvm_gpc_refresh()
303 gpc->khva = old_khva + page_offset; in __kvm_gpc_refresh()
315 gpc->valid = false; in __kvm_gpc_refresh()
316 gpc->pfn = KVM_PFN_ERR_FAULT; in __kvm_gpc_refresh()
317 gpc->khva = NULL; in __kvm_gpc_refresh()
321 unmap_old = (old_pfn != gpc->pfn); in __kvm_gpc_refresh()
324 write_unlock_irq(&gpc->lock); in __kvm_gpc_refresh()
326 mutex_unlock(&gpc->refresh_lock); in __kvm_gpc_refresh()
334 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) in kvm_gpc_refresh() argument
336 return __kvm_gpc_refresh(gpc, gpc->gpa, len); in kvm_gpc_refresh()
340 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, in kvm_gpc_init() argument
346 rwlock_init(&gpc->lock); in kvm_gpc_init()
347 mutex_init(&gpc->refresh_lock); in kvm_gpc_init()
349 gpc->kvm = kvm; in kvm_gpc_init()
350 gpc->vcpu = vcpu; in kvm_gpc_init()
351 gpc->usage = usage; in kvm_gpc_init()
352 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_init()
353 gpc->uhva = KVM_HVA_ERR_BAD; in kvm_gpc_init()
357 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) in kvm_gpc_activate() argument
359 struct kvm *kvm = gpc->kvm; in kvm_gpc_activate()
361 if (!gpc->active) { in kvm_gpc_activate()
362 if (KVM_BUG_ON(gpc->valid, kvm)) in kvm_gpc_activate()
366 list_add(&gpc->list, &kvm->gpc_list); in kvm_gpc_activate()
374 write_lock_irq(&gpc->lock); in kvm_gpc_activate()
375 gpc->active = true; in kvm_gpc_activate()
376 write_unlock_irq(&gpc->lock); in kvm_gpc_activate()
378 return __kvm_gpc_refresh(gpc, gpa, len); in kvm_gpc_activate()
382 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) in kvm_gpc_deactivate() argument
384 struct kvm *kvm = gpc->kvm; in kvm_gpc_deactivate()
388 if (gpc->active) { in kvm_gpc_deactivate()
394 write_lock_irq(&gpc->lock); in kvm_gpc_deactivate()
395 gpc->active = false; in kvm_gpc_deactivate()
396 gpc->valid = false; in kvm_gpc_deactivate()
404 old_khva = gpc->khva - offset_in_page(gpc->khva); in kvm_gpc_deactivate()
405 gpc->khva = NULL; in kvm_gpc_deactivate()
407 old_pfn = gpc->pfn; in kvm_gpc_deactivate()
408 gpc->pfn = KVM_PFN_ERR_FAULT; in kvm_gpc_deactivate()
409 write_unlock_irq(&gpc->lock); in kvm_gpc_deactivate()
412 list_del(&gpc->list); in kvm_gpc_deactivate()