Lines Matching refs:kvm

36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)  in kvm_arch_irqfd_allowed()  argument
46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject()
63 srcu_read_lock_held(&resampler->kvm->irq_srcu)) in irqfd_resampler_notify()
76 struct kvm *kvm; in irqfd_resampler_ack() local
81 kvm = resampler->kvm; in irqfd_resampler_ack()
83 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack()
86 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack()
88 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack()
95 struct kvm *kvm = resampler->kvm; in irqfd_resampler_shutdown() local
97 mutex_lock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
103 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); in irqfd_resampler_shutdown()
108 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_shutdown()
112 synchronize_srcu_expedited(&kvm->irq_srcu); in irqfd_resampler_shutdown()
115 mutex_unlock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
126 struct kvm *kvm = irqfd->kvm; in irqfd_shutdown() local
130 synchronize_srcu_expedited(&kvm->irq_srcu); in irqfd_shutdown()
184 struct kvm *kvm, int irq_source_id, in kvm_arch_set_irq_inatomic() argument
201 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup() local
215 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_wakeup()
221 if (kvm_arch_set_irq_inatomic(&irq, kvm, in irqfd_wakeup()
225 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_wakeup()
238 spin_lock_irqsave(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
252 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
258 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) in irqfd_update() argument
264 lockdep_assert_held(&kvm->irqfds.lock); in irqfd_update()
266 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
281 struct kvm *kvm; member
291 struct kvm *kvm = p->kvm; in kvm_irqfd_register() local
297 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_register()
304 irqfd_update(kvm, irqfd); in kvm_irqfd_register()
324 spin_release(&kvm->irqfds.lock.dep_map, _RET_IP_); in kvm_irqfd_register()
326 spin_acquire(&kvm->irqfds.lock.dep_map, 0, 0, _RET_IP_); in kvm_irqfd_register()
330 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_register()
333 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_register()
356 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_assign() argument
365 if (!kvm_arch_intc_initialized(kvm)) in kvm_irqfd_assign()
368 if (!kvm_arch_irqfd_allowed(kvm, args)) in kvm_irqfd_assign()
375 irqfd->kvm = kvm; in kvm_irqfd_assign()
380 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock); in kvm_irqfd_assign()
408 mutex_lock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
411 &kvm->irqfds.resampler_list, link) { in kvm_irqfd_assign()
423 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
427 resampler->kvm = kvm; in kvm_irqfd_assign()
433 list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list); in kvm_irqfd_assign()
434 kvm_register_irq_ack_notifier(kvm, in kvm_irqfd_assign()
440 synchronize_srcu_expedited(&kvm->irq_srcu); in kvm_irqfd_assign()
442 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
455 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irqfd_assign()
464 irqfd_pt.kvm = kvm; in kvm_irqfd_assign()
489 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
493 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
509 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_has_notifier() argument
514 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irq_has_notifier()
515 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_irq_has_notifier()
517 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_irq_has_notifier()
518 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_irq_has_notifier()
520 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
524 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
530 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) in kvm_notify_acked_gsi() argument
534 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_notify_acked_gsi()
535 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_notify_acked_gsi()
540 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_notify_acked_irq() argument
546 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_acked_irq()
547 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_acked_irq()
549 kvm_notify_acked_gsi(kvm, gsi); in kvm_notify_acked_irq()
550 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_acked_irq()
553 void kvm_register_irq_ack_notifier(struct kvm *kvm, in kvm_register_irq_ack_notifier() argument
556 mutex_lock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
557 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); in kvm_register_irq_ack_notifier()
558 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
559 kvm_arch_post_irq_ack_notifier_list_update(kvm); in kvm_register_irq_ack_notifier()
562 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, in kvm_unregister_irq_ack_notifier() argument
565 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
567 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
568 synchronize_srcu_expedited(&kvm->irq_srcu); in kvm_unregister_irq_ack_notifier()
569 kvm_arch_post_irq_ack_notifier_list_update(kvm); in kvm_unregister_irq_ack_notifier()
576 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_deassign() argument
585 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
587 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
602 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
616 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
622 return kvm_irqfd_deassign(kvm, args); in kvm_irqfd()
624 return kvm_irqfd_assign(kvm, args); in kvm_irqfd()
632 kvm_irqfd_release(struct kvm *kvm) in kvm_irqfd_release() argument
636 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
638 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
641 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
655 void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
659 spin_lock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
661 list_for_each_entry(irqfd, &kvm->irqfds.items, list) { in kvm_irq_routing_update()
667 irqfd_update(kvm, irqfd); in kvm_irq_routing_update()
675 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
678 bool kvm_notify_irqfd_resampler(struct kvm *kvm, in kvm_notify_irqfd_resampler() argument
685 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_irqfd_resampler()
686 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_irqfd_resampler()
689 &kvm->irqfds.resampler_list, link, in kvm_notify_irqfd_resampler()
690 srcu_read_lock_held(&kvm->irq_srcu)) { in kvm_notify_irqfd_resampler()
693 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
698 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
835 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) in ioeventfd_check_collision() argument
839 list_for_each_entry(_p, &kvm->ioeventfds, list) in ioeventfd_check_collision()
860 static int kvm_assign_ioeventfd_idx(struct kvm *kvm, in kvm_assign_ioeventfd_idx() argument
891 mutex_lock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
894 if (ioeventfd_check_collision(kvm, p)) { in kvm_assign_ioeventfd_idx()
901 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, in kvm_assign_ioeventfd_idx()
906 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++; in kvm_assign_ioeventfd_idx()
907 list_add_tail(&p->list, &kvm->ioeventfds); in kvm_assign_ioeventfd_idx()
909 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
914 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
924 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_deassign_ioeventfd_idx() argument
939 mutex_lock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
941 list_for_each_entry(p, &kvm->ioeventfds, list) { in kvm_deassign_ioeventfd_idx()
952 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); in kvm_deassign_ioeventfd_idx()
953 bus = kvm_get_bus(kvm, bus_idx); in kvm_deassign_ioeventfd_idx()
960 mutex_unlock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
967 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_deassign_ioeventfd() argument
970 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_deassign_ioeventfd()
973 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_deassign_ioeventfd()
979 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_assign_ioeventfd() argument
1009 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
1017 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_assign_ioeventfd()
1025 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
1031 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
1034 return kvm_deassign_ioeventfd(kvm, args); in kvm_ioeventfd()
1036 return kvm_assign_ioeventfd(kvm, args); in kvm_ioeventfd()
1040 kvm_eventfd_init(struct kvm *kvm) in kvm_eventfd_init() argument
1043 spin_lock_init(&kvm->irqfds.lock); in kvm_eventfd_init()
1044 INIT_LIST_HEAD(&kvm->irqfds.items); in kvm_eventfd_init()
1045 INIT_LIST_HEAD(&kvm->irqfds.resampler_list); in kvm_eventfd_init()
1046 mutex_init(&kvm->irqfds.resampler_lock); in kvm_eventfd_init()
1048 INIT_LIST_HEAD(&kvm->ioeventfds); in kvm_eventfd_init()