Lines Matching refs:xd
33 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) argument
34 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) argument
80 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) in xive_vm_esb_load() argument
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_esb_load()
87 val = __raw_readq(__x_eoi_page(xd) + offset); in xive_vm_esb_load()
95 static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_vm_source_eoi() argument
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_source_eoi()
99 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); in xive_vm_source_eoi()
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_vm_source_eoi()
106 __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); in xive_vm_source_eoi()
119 eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00); in xive_vm_source_eoi()
122 if ((eoi_val & 1) && __x_trig_page(xd)) in xive_vm_source_eoi()
123 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_source_eoi()
401 struct xive_irq_data *xd; in xive_vm_scan_for_rerouted_irqs() local
439 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_scan_for_rerouted_irqs()
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_vm_scan_for_rerouted_irqs()
443 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_vm_scan_for_rerouted_irqs()
446 xive_vm_source_eoi(hw_num, xd); in xive_vm_scan_for_rerouted_irqs()
521 struct xive_irq_data *xd; in xive_vm_h_eoi() local
559 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_h_eoi()
588 xive_vm_source_eoi(hw_num, xd); in xive_vm_h_eoi()
592 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_h_eoi()
825 static bool xive_irq_trigger(struct xive_irq_data *xd) in xive_irq_trigger() argument
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
832 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
920 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in kvmppc_xive_attach_escalation() local
922 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_attach_escalation()
923 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
924 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
925 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; in kvmppc_xive_attach_escalation()
1088 struct xive_irq_data *xd; in xive_lock_and_mask() local
1113 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_lock_and_mask()
1116 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); in xive_lock_and_mask()
1148 struct xive_irq_data *xd; in xive_finish_unmask() local
1156 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_finish_unmask()
1160 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_finish_unmask()
1168 xive_vm_source_eoi(hw_num, xd); in xive_finish_unmask()
1791 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_cleanup_single_escalation() local
1798 xd->stale_p = false; in xive_cleanup_single_escalation()
1801 xd->stale_p = true; in xive_cleanup_single_escalation()
2610 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) in kvmppc_xive_cleanup_irq() argument
2612 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_irq()
2833 struct xive_irq_data *xd = in kvmppc_xive_debug_show_queues() local
2835 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_queues()
2841 xd->eoi_page); in kvmppc_xive_debug_show_queues()
2856 struct xive_irq_data *xd; in kvmppc_xive_debug_show_sources() local
2863 kvmppc_xive_select_irq(state, &hw_num, &xd); in kvmppc_xive_debug_show_sources()
2865 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_sources()
2868 xd->src_chip); in kvmppc_xive_debug_show_sources()