Lines Matching refs:vcpu

38 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)  in kvm_async_pf_vcpu_init()  argument
40 INIT_LIST_HEAD(&vcpu->async_pf.done); in kvm_async_pf_vcpu_init()
41 INIT_LIST_HEAD(&vcpu->async_pf.queue); in kvm_async_pf_vcpu_init()
42 spin_lock_init(&vcpu->async_pf.lock); in kvm_async_pf_vcpu_init()
49 struct kvm_vcpu *vcpu = apf->vcpu; in async_pf_execute() local
50 struct mm_struct *mm = vcpu->kvm->mm; in async_pf_execute()
78 kvm_arch_async_page_present(vcpu, apf); in async_pf_execute()
80 spin_lock(&vcpu->async_pf.lock); in async_pf_execute()
81 first = list_empty(&vcpu->async_pf.done); in async_pf_execute()
82 list_add_tail(&apf->link, &vcpu->async_pf.done); in async_pf_execute()
83 spin_unlock(&vcpu->async_pf.lock); in async_pf_execute()
92 kvm_arch_async_page_present_queued(vcpu); in async_pf_execute()
96 __kvm_vcpu_wake_up(vcpu); in async_pf_execute()
120 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) in kvm_clear_async_pf_completion_queue() argument
123 while (!list_empty(&vcpu->async_pf.queue)) { in kvm_clear_async_pf_completion_queue()
125 list_first_entry(&vcpu->async_pf.queue, in kvm_clear_async_pf_completion_queue()
137 spin_lock(&vcpu->async_pf.lock); in kvm_clear_async_pf_completion_queue()
138 while (!list_empty(&vcpu->async_pf.done)) { in kvm_clear_async_pf_completion_queue()
140 list_first_entry(&vcpu->async_pf.done, in kvm_clear_async_pf_completion_queue()
144 spin_unlock(&vcpu->async_pf.lock); in kvm_clear_async_pf_completion_queue()
146 spin_lock(&vcpu->async_pf.lock); in kvm_clear_async_pf_completion_queue()
148 spin_unlock(&vcpu->async_pf.lock); in kvm_clear_async_pf_completion_queue()
150 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue()
153 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) in kvm_check_async_pf_completion() argument
157 while (!list_empty_careful(&vcpu->async_pf.done) && in kvm_check_async_pf_completion()
158 kvm_arch_can_dequeue_async_page_present(vcpu)) { in kvm_check_async_pf_completion()
159 spin_lock(&vcpu->async_pf.lock); in kvm_check_async_pf_completion()
160 work = list_first_entry(&vcpu->async_pf.done, typeof(*work), in kvm_check_async_pf_completion()
163 spin_unlock(&vcpu->async_pf.lock); in kvm_check_async_pf_completion()
165 kvm_arch_async_page_ready(vcpu, work); in kvm_check_async_pf_completion()
167 kvm_arch_async_page_present(vcpu, work); in kvm_check_async_pf_completion()
170 vcpu->async_pf.queued--; in kvm_check_async_pf_completion()
179 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in kvm_setup_async_pf() argument
184 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf()
200 work->vcpu = vcpu; in kvm_setup_async_pf()
207 list_add_tail(&work->queue, &vcpu->async_pf.queue); in kvm_setup_async_pf()
208 vcpu->async_pf.queued++; in kvm_setup_async_pf()
209 work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work); in kvm_setup_async_pf()
216 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) in kvm_async_pf_wakeup_all() argument
221 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_async_pf_wakeup_all()
231 spin_lock(&vcpu->async_pf.lock); in kvm_async_pf_wakeup_all()
232 first = list_empty(&vcpu->async_pf.done); in kvm_async_pf_wakeup_all()
233 list_add_tail(&work->link, &vcpu->async_pf.done); in kvm_async_pf_wakeup_all()
234 spin_unlock(&vcpu->async_pf.lock); in kvm_async_pf_wakeup_all()
237 kvm_arch_async_page_present_queued(vcpu); in kvm_async_pf_wakeup_all()
239 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()