Lines Matching refs:gpc

39 	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;  in kvm_xen_shared_info_init()  local
49 kvm_gpc_deactivate(gpc); in kvm_xen_shared_info_init()
54 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); in kvm_xen_shared_info_init()
65 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
67 if (gpc->valid) in kvm_xen_shared_info_init()
70 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
84 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
91 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
107 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
500 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events() local
511 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
512 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { in kvm_xen_inject_pending_events()
513 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
515 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) in kvm_xen_inject_pending_events()
518 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
523 struct vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
535 struct compat_vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
546 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
552 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_inject_pending_events()
557 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt() local
574 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
575 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { in __kvm_xen_has_interrupt()
576 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
589 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) { in __kvm_xen_has_interrupt()
596 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
599 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; in __kvm_xen_has_interrupt()
600 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
1171 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event() local
1178 read_lock_irqsave(&gpc->lock, flags); in wait_pending_event()
1179 if (!kvm_gpc_check(gpc, PAGE_SIZE)) in wait_pending_event()
1184 struct shared_info *shinfo = gpc->khva; in wait_pending_event()
1187 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event()
1199 read_unlock_irqrestore(&gpc->lock, flags); in wait_pending_event()
1554 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast() local
1582 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1583 if (!kvm_gpc_check(gpc, PAGE_SIZE)) in kvm_xen_set_evtchn_fast()
1587 struct shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1592 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1613 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1614 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1616 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1617 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { in kvm_xen_set_evtchn_fast()
1628 struct vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1634 struct compat_vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1650 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1699 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn() local
1707 rc = kvm_gpc_refresh(gpc, PAGE_SIZE); in kvm_xen_set_evtchn()