Lines Matching refs:pirq_dpci

61 static void raise_softirq_for(struct hvm_pirq_dpci *pirq_dpci)  in raise_softirq_for()  argument
65 if ( test_and_set_bit(STATE_SCHED, &pirq_dpci->state) ) in raise_softirq_for()
68 get_knownalive_domain(pirq_dpci->dom); in raise_softirq_for()
71 list_add_tail(&pirq_dpci->softirq_list, &this_cpu(dpci_list)); in raise_softirq_for()
84 bool pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci) in pt_pirq_softirq_active() argument
86 if ( pirq_dpci->state & ((1 << STATE_RUN) | (1 << STATE_SCHED)) ) in pt_pirq_softirq_active()
104 static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci) in pt_pirq_softirq_reset() argument
106 struct domain *d = pirq_dpci->dom; in pt_pirq_softirq_reset()
110 switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0) ) in pt_pirq_softirq_reset()
127 pirq_dpci->dom = NULL; in pt_pirq_softirq_reset()
136 pirq_dpci->masked = 0; in pt_pirq_softirq_reset()
144 static int pt_irq_guest_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci, in pt_irq_guest_eoi() argument
148 &pirq_dpci->flags) ) in pt_irq_guest_eoi()
150 pirq_dpci->masked = 0; in pt_irq_guest_eoi()
151 pirq_dpci->pending = 0; in pt_irq_guest_eoi()
152 pirq_guest_eoi(dpci_pirq(pirq_dpci)); in pt_irq_guest_eoi()
199 pirq_dpci(pirq)->flags |= HVM_IRQ_DPCI_EOI_LATCH; in pt_irq_time_out()
282 struct hvm_pirq_dpci *pirq_dpci; in pt_irq_create_bind() local
321 pirq_dpci = pirq_dpci(info); in pt_irq_create_bind()
331 if ( pt_pirq_softirq_active(pirq_dpci) ) in pt_irq_create_bind()
349 if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) in pt_irq_create_bind()
351 pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI | in pt_irq_create_bind()
353 pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec; in pt_irq_create_bind()
354 pirq_dpci->gmsi.gflags = gflags; in pt_irq_create_bind()
366 pirq_dpci->dom = d; in pt_irq_create_bind()
381 pt_pirq_softirq_reset(pirq_dpci); in pt_irq_create_bind()
386 pirq_dpci->gmsi.gflags = 0; in pt_irq_create_bind()
387 pirq_dpci->gmsi.gvec = 0; in pt_irq_create_bind()
388 pirq_dpci->dom = NULL; in pt_irq_create_bind()
389 pirq_dpci->flags = 0; in pt_irq_create_bind()
399 if ( (pirq_dpci->flags & mask) != mask ) in pt_irq_create_bind()
406 if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec || in pt_irq_create_bind()
407 pirq_dpci->gmsi.gflags != gflags ) in pt_irq_create_bind()
412 pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec; in pt_irq_create_bind()
413 pirq_dpci->gmsi.gflags = gflags; in pt_irq_create_bind()
417 dest = MASK_EXTR(pirq_dpci->gmsi.gflags, in pt_irq_create_bind()
419 dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK; in pt_irq_create_bind()
420 delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags, in pt_irq_create_bind()
424 pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id; in pt_irq_create_bind()
427 pirq_dpci->gmsi.posted = false; in pt_irq_create_bind()
433 pirq_dpci->gmsi.gvec); in pt_irq_create_bind()
435 pirq_dpci->gmsi.posted = true; in pt_irq_create_bind()
443 info, pirq_dpci->gmsi.gvec); in pt_irq_create_bind()
494 list_add_tail(&digl->list, &pirq_dpci->digl_list); in pt_irq_create_bind()
520 if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) in pt_irq_create_bind()
525 pirq_dpci->dom = d; in pt_irq_create_bind()
528 pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | in pt_irq_create_bind()
536 pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | in pt_irq_create_bind()
553 pirq_dpci->flags |= HVM_IRQ_DPCI_IDENTITY_GSI; in pt_irq_create_bind()
565 if ( pt_irq_need_timer(pirq_dpci->flags) ) in pt_irq_create_bind()
566 init_timer(&pirq_dpci->timer, pt_irq_time_out, pirq_dpci, 0); in pt_irq_create_bind()
571 if ( pt_irq_need_timer(pirq_dpci->flags) ) in pt_irq_create_bind()
572 kill_timer(&pirq_dpci->timer); in pt_irq_create_bind()
577 pirq_dpci->dom = NULL; in pt_irq_create_bind()
588 pirq_dpci->flags = 0; in pt_irq_create_bind()
626 struct hvm_pirq_dpci *pirq_dpci; in pt_irq_destroy_bind() local
664 pirq_dpci = pirq_dpci(pirq); in pt_irq_destroy_bind()
699 if ( pirq_dpci && (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) in pt_irq_destroy_bind()
701 list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list ) in pt_irq_destroy_bind()
711 what = list_empty(&pirq_dpci->digl_list) ? "final" : "partial"; in pt_irq_destroy_bind()
716 else if ( pirq_dpci && pirq_dpci->gmsi.posted ) in pt_irq_destroy_bind()
719 if ( pirq_dpci && (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) && in pt_irq_destroy_bind()
720 list_empty(&pirq_dpci->digl_list) ) in pt_irq_destroy_bind()
724 if ( pt_irq_need_timer(pirq_dpci->flags) ) in pt_irq_destroy_bind()
725 kill_timer(&pirq_dpci->timer); in pt_irq_destroy_bind()
726 pirq_dpci->flags = 0; in pt_irq_destroy_bind()
731 pt_pirq_softirq_reset(pirq_dpci); in pt_irq_destroy_bind()
787 struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirqs[i]); in pt_pirq_iterate() local
790 if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) in pt_pirq_iterate()
791 rc = cb(d, pirq_dpci, arg); in pt_pirq_iterate()
801 struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq); in hvm_do_IRQ_dpci() local
806 !pirq_dpci || !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) ) in hvm_do_IRQ_dpci()
809 pirq_dpci->masked = 1; in hvm_do_IRQ_dpci()
810 raise_softirq_for(pirq_dpci); in hvm_do_IRQ_dpci()
815 static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci) in __msi_pirq_eoi() argument
819 if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) && in __msi_pirq_eoi()
820 (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) ) in __msi_pirq_eoi()
822 struct pirq *pirq = dpci_pirq(pirq_dpci); in __msi_pirq_eoi()
833 struct hvm_pirq_dpci *pirq_dpci, void *arg) in _hvm_dpci_msi_eoi() argument
837 if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) && in _hvm_dpci_msi_eoi()
838 (pirq_dpci->gmsi.gvec == vector) ) in _hvm_dpci_msi_eoi()
840 unsigned int dest = MASK_EXTR(pirq_dpci->gmsi.gflags, in _hvm_dpci_msi_eoi()
842 bool dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK; in _hvm_dpci_msi_eoi()
847 __msi_pirq_eoi(pirq_dpci); in _hvm_dpci_msi_eoi()
865 static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) in hvm_dirq_assist() argument
874 if ( test_and_clear_bool(pirq_dpci->masked) ) in hvm_dirq_assist()
876 struct pirq *pirq = dpci_pirq(pirq_dpci); in hvm_dirq_assist()
883 if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI ) in hvm_dirq_assist()
890 if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI ) in hvm_dirq_assist()
892 vmsi_deliver_pirq(d, pirq_dpci); in hvm_dirq_assist()
897 list_for_each_entry ( digl, &pirq_dpci->digl_list, list ) in hvm_dirq_assist()
899 ASSERT(!(pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI)); in hvm_dirq_assist()
901 pirq_dpci->pending++; in hvm_dirq_assist()
904 if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI ) in hvm_dirq_assist()
907 pirq_dpci->pending++; in hvm_dirq_assist()
910 if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE ) in hvm_dirq_assist()
913 __msi_pirq_eoi(pirq_dpci); in hvm_dirq_assist()
925 ASSERT(pt_irq_need_timer(pirq_dpci->flags)); in hvm_dirq_assist()
926 set_timer(&pirq_dpci->timer, NOW() + PT_IRQ_TIME_OUT); in hvm_dirq_assist()
934 struct hvm_pirq_dpci *pirq_dpci; in hvm_pirq_eoi() local
942 pirq_dpci = pirq_dpci(pirq); in hvm_pirq_eoi()
948 if ( --pirq_dpci->pending || in hvm_pirq_eoi()
950 !pt_irq_need_timer(pirq_dpci->flags) ) in hvm_pirq_eoi()
953 stop_timer(&pirq_dpci->timer); in hvm_pirq_eoi()
975 if ( !pirq_dpci(pirq) ) in hvm_gsi_eoi()
1032 struct hvm_pirq_dpci *pirq_dpci; in dpci_softirq() local
1035 pirq_dpci = list_entry(our_list.next, struct hvm_pirq_dpci, softirq_list); in dpci_softirq()
1036 list_del(&pirq_dpci->softirq_list); in dpci_softirq()
1038 d = pirq_dpci->dom; in dpci_softirq()
1040 if ( test_and_set_bit(STATE_RUN, &pirq_dpci->state) ) in dpci_softirq()
1046 list_add_tail(&pirq_dpci->softirq_list, &this_cpu(dpci_list)); in dpci_softirq()
1055 if ( test_and_clear_bit(STATE_SCHED, &pirq_dpci->state) ) in dpci_softirq()
1057 hvm_dirq_assist(d, pirq_dpci); in dpci_softirq()
1060 clear_bit(STATE_RUN, &pirq_dpci->state); in dpci_softirq()