Lines Matching refs:qpage

143 		__be32 *qpage;  in xive_vm_scan_interrupts()  local
171 qpage = READ_ONCE(q->qpage); in xive_vm_scan_interrupts()
178 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); in xive_vm_scan_interrupts()
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) { in xive_vm_scan_interrupts()
402 __be32 *qpage; in xive_vm_scan_for_rerouted_irqs() local
407 qpage = READ_ONCE(q->qpage); in xive_vm_scan_for_rerouted_irqs()
408 if (!qpage) in xive_vm_scan_for_rerouted_irqs()
413 entry = be32_to_cpup(qpage + idx); in xive_vm_scan_for_rerouted_irqs()
436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); in xive_vm_scan_for_rerouted_irqs()
941 void *qpage; in xive_provision_queue() local
944 if (WARN_ON(q->qpage)) in xive_provision_queue()
948 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); in xive_provision_queue()
949 if (!qpage) { in xive_provision_queue()
954 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
1037 if (WARN_ON(!q->qpage)) in xive_try_pick_queue()
1847 if (q->qpage) { in kvmppc_xive_cleanup_vcpu()
1848 free_pages((unsigned long)q->qpage, in kvmppc_xive_cleanup_vcpu()
1850 q->qpage = NULL; in kvmppc_xive_cleanup_vcpu()
2118 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); in xive_pre_save_queue()
2148 if (xc->queues[j].qpage) in xive_pre_save_scan()
2819 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2822 if (q->qpage) { in kvmppc_xive_debug_show_queues()
2825 i0 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2827 i1 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()