Lines Matching refs:desc

119     struct irq_desc *desc = irq_to_desc(irq);  in __bind_irq_vector()  local
127 if ( (desc->arch.vector == vector) && in __bind_irq_vector()
128 cpumask_equal(desc->arch.cpu_mask, &online_mask) ) in __bind_irq_vector()
130 if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED ) in __bind_irq_vector()
135 desc->arch.vector = vector; in __bind_irq_vector()
136 cpumask_copy(desc->arch.cpu_mask, &online_mask); in __bind_irq_vector()
137 if ( desc->arch.used_vectors ) in __bind_irq_vector()
139 ASSERT(!test_bit(vector, desc->arch.used_vectors)); in __bind_irq_vector()
140 set_bit(vector, desc->arch.used_vectors); in __bind_irq_vector()
142 desc->arch.used = IRQ_USED; in __bind_irq_vector()
163 struct irq_desc *desc; in create_irq() local
167 desc = irq_to_desc(irq); in create_irq()
168 if (cmpxchg(&desc->arch.used, IRQ_UNUSED, IRQ_RESERVED) == IRQ_UNUSED) in create_irq()
175 ret = init_one_irq_desc(desc); in create_irq()
190 desc->arch.used = IRQ_UNUSED; in create_irq()
207 struct irq_desc *desc = irq_to_desc(irq); in destroy_irq() local
223 spin_lock_irqsave(&desc->lock, flags); in destroy_irq()
224 desc->status &= ~IRQ_GUEST; in destroy_irq()
225 desc->handler->shutdown(desc); in destroy_irq()
226 desc->status |= IRQ_DISABLED; in destroy_irq()
227 action = desc->action; in destroy_irq()
228 desc->action = NULL; in destroy_irq()
229 desc->msi_desc = NULL; in destroy_irq()
230 cpumask_setall(desc->affinity); in destroy_irq()
231 spin_unlock_irqrestore(&desc->lock, flags); in destroy_irq()
234 do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); in destroy_irq()
236 spin_lock_irqsave(&desc->lock, flags); in destroy_irq()
237 desc->handler = &no_irq_type; in destroy_irq()
239 desc->arch.used_vectors = NULL; in destroy_irq()
240 spin_unlock_irqrestore(&desc->lock, flags); in destroy_irq()
249 struct irq_desc *desc = irq_to_desc(irq); in __clear_irq_vector() local
251 BUG_ON(!desc->arch.vector); in __clear_irq_vector()
254 vector = desc->arch.vector; in __clear_irq_vector()
255 cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map); in __clear_irq_vector()
262 desc->arch.vector = IRQ_VECTOR_UNASSIGNED; in __clear_irq_vector()
263 cpumask_clear(desc->arch.cpu_mask); in __clear_irq_vector()
265 if ( desc->arch.used_vectors ) in __clear_irq_vector()
267 ASSERT(test_bit(vector, desc->arch.used_vectors)); in __clear_irq_vector()
268 clear_bit(vector, desc->arch.used_vectors); in __clear_irq_vector()
271 desc->arch.used = IRQ_UNUSED; in __clear_irq_vector()
275 if ( likely(!desc->arch.move_in_progress) ) in __clear_irq_vector()
279 old_vector = desc->arch.old_vector; in __clear_irq_vector()
280 cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map); in __clear_irq_vector()
288 desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED; in __clear_irq_vector()
289 cpumask_clear(desc->arch.old_cpu_mask); in __clear_irq_vector()
291 if ( desc->arch.used_vectors ) in __clear_irq_vector()
293 ASSERT(test_bit(old_vector, desc->arch.used_vectors)); in __clear_irq_vector()
294 clear_bit(old_vector, desc->arch.used_vectors); in __clear_irq_vector()
297 desc->arch.move_in_progress = 0; in __clear_irq_vector()
329 int arch_init_one_irq_desc(struct irq_desc *desc) in arch_init_one_irq_desc() argument
331 if ( !zalloc_cpumask_var(&desc->arch.cpu_mask) ) in arch_init_one_irq_desc()
334 if ( !alloc_cpumask_var(&desc->arch.old_cpu_mask) ) in arch_init_one_irq_desc()
336 free_cpumask_var(desc->arch.cpu_mask); in arch_init_one_irq_desc()
340 if ( !alloc_cpumask_var(&desc->arch.pending_mask) ) in arch_init_one_irq_desc()
342 free_cpumask_var(desc->arch.old_cpu_mask); in arch_init_one_irq_desc()
343 free_cpumask_var(desc->arch.cpu_mask); in arch_init_one_irq_desc()
347 desc->arch.vector = IRQ_VECTOR_UNASSIGNED; in arch_init_one_irq_desc()
348 desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED; in arch_init_one_irq_desc()
355 struct irq_desc *desc; in init_irq_data() local
368 desc = irq_to_desc(irq); in init_irq_data()
369 desc->irq = irq; in init_irq_data()
370 init_one_irq_desc(desc); in init_irq_data()
389 static void ack_none(struct irq_desc *desc) in ack_none() argument
391 ack_bad_irq(desc->irq); in ack_none()
409 struct irq_desc *desc = irq_to_desc(irq); in irq_get_used_vector_mask() local
413 if ( desc->arch.used_vectors ) in irq_get_used_vector_mask()
442 int irq, struct irq_desc *desc, const cpumask_t *mask) in __assign_irq_vector() argument
463 if (cpumask_intersects(&tmp_mask, desc->arch.cpu_mask)) { in __assign_irq_vector()
464 desc->arch.vector = old_vector; in __assign_irq_vector()
469 if ( desc->arch.move_in_progress || desc->arch.move_cleanup_count ) in __assign_irq_vector()
477 if ( desc->arch.used == IRQ_USED ) in __assign_irq_vector()
479 irq_used_vectors = desc->arch.used_vectors; in __assign_irq_vector()
521 desc->arch.move_in_progress = 1; in __assign_irq_vector()
522 cpumask_copy(desc->arch.old_cpu_mask, desc->arch.cpu_mask); in __assign_irq_vector()
523 desc->arch.old_vector = desc->arch.vector; in __assign_irq_vector()
528 desc->arch.vector = vector; in __assign_irq_vector()
529 cpumask_copy(desc->arch.cpu_mask, &tmp_mask); in __assign_irq_vector()
531 desc->arch.used = IRQ_USED; in __assign_irq_vector()
532 ASSERT((desc->arch.used_vectors == NULL) in __assign_irq_vector()
533 || (desc->arch.used_vectors == irq_used_vectors)); in __assign_irq_vector()
534 desc->arch.used_vectors = irq_used_vectors; in __assign_irq_vector()
536 if ( desc->arch.used_vectors ) in __assign_irq_vector()
538 ASSERT(!test_bit(vector, desc->arch.used_vectors)); in __assign_irq_vector()
540 set_bit(vector, desc->arch.used_vectors); in __assign_irq_vector()
553 struct irq_desc *desc = irq_to_desc(irq); in assign_irq_vector() local
558 ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS); in assign_irq_vector()
560 ret = desc->arch.vector; in assign_irq_vector()
561 cpumask_copy(desc->affinity, desc->arch.cpu_mask); in assign_irq_vector()
581 struct irq_desc *desc = irq_to_desc(irq); in setup_vector_irq() local
583 if ( !irq_desc_initialized(desc) ) in setup_vector_irq()
588 cpumask_set_cpu(cpu, desc->arch.cpu_mask); in setup_vector_irq()
589 else if ( !cpumask_test_cpu(cpu, desc->arch.cpu_mask) ) in setup_vector_irq()
595 void move_masked_irq(struct irq_desc *desc) in move_masked_irq() argument
597 cpumask_t *pending_mask = desc->arch.pending_mask; in move_masked_irq()
599 if (likely(!(desc->status & IRQ_MOVE_PENDING))) in move_masked_irq()
602 desc->status &= ~IRQ_MOVE_PENDING; in move_masked_irq()
607 if (!desc->handler->set_affinity) in move_masked_irq()
620 desc->handler->set_affinity(desc, pending_mask); in move_masked_irq()
625 void move_native_irq(struct irq_desc *desc) in move_native_irq() argument
627 if (likely(!(desc->status & IRQ_MOVE_PENDING))) in move_native_irq()
630 if (unlikely(desc->status & IRQ_DISABLED)) in move_native_irq()
633 desc->handler->disable(desc); in move_native_irq()
634 move_masked_irq(desc); in move_native_irq()
635 desc->handler->enable(desc); in move_native_irq()
650 struct irq_desc *desc; in irq_move_cleanup_interrupt() local
659 desc = irq_to_desc(irq); in irq_move_cleanup_interrupt()
660 if (!desc) in irq_move_cleanup_interrupt()
663 spin_lock(&desc->lock); in irq_move_cleanup_interrupt()
664 if (!desc->arch.move_cleanup_count) in irq_move_cleanup_interrupt()
667 if ( vector == desc->arch.vector && in irq_move_cleanup_interrupt()
668 cpumask_test_cpu(me, desc->arch.cpu_mask) ) in irq_move_cleanup_interrupt()
690 desc->arch.move_cleanup_count--; in irq_move_cleanup_interrupt()
692 if ( desc->arch.move_cleanup_count == 0 ) in irq_move_cleanup_interrupt()
694 desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED; in irq_move_cleanup_interrupt()
695 cpumask_clear(desc->arch.old_cpu_mask); in irq_move_cleanup_interrupt()
697 if ( desc->arch.used_vectors ) in irq_move_cleanup_interrupt()
699 ASSERT(test_bit(vector, desc->arch.used_vectors)); in irq_move_cleanup_interrupt()
700 clear_bit(vector, desc->arch.used_vectors); in irq_move_cleanup_interrupt()
704 spin_unlock(&desc->lock); in irq_move_cleanup_interrupt()
708 static void send_cleanup_vector(struct irq_desc *desc) in send_cleanup_vector() argument
712 cpumask_and(&cleanup_mask, desc->arch.old_cpu_mask, &cpu_online_map); in send_cleanup_vector()
713 desc->arch.move_cleanup_count = cpumask_weight(&cleanup_mask); in send_cleanup_vector()
716 desc->arch.move_in_progress = 0; in send_cleanup_vector()
719 void irq_complete_move(struct irq_desc *desc) in irq_complete_move() argument
723 if (likely(!desc->arch.move_in_progress)) in irq_complete_move()
729 if ( vector == desc->arch.vector && in irq_complete_move()
730 cpumask_test_cpu(me, desc->arch.cpu_mask) ) in irq_complete_move()
731 send_cleanup_vector(desc); in irq_complete_move()
734 unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask) in set_desc_affinity() argument
744 irq = desc->irq; in set_desc_affinity()
747 ret = __assign_irq_vector(irq, desc, mask); in set_desc_affinity()
753 cpumask_copy(desc->affinity, mask); in set_desc_affinity()
754 cpumask_and(&dest_mask, mask, desc->arch.cpu_mask); in set_desc_affinity()
760 void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask) in irq_set_affinity() argument
762 if (!desc->handler->set_affinity) in irq_set_affinity()
765 ASSERT(spin_is_locked(&desc->lock)); in irq_set_affinity()
766 desc->status &= ~IRQ_MOVE_PENDING; in irq_set_affinity()
768 cpumask_copy(desc->arch.pending_mask, mask); in irq_set_affinity()
770 desc->status |= IRQ_MOVE_PENDING; in irq_set_affinity()
776 struct irq_desc *desc = domain_spin_lock_irq_desc(d, pirq, &flags); in pirq_set_affinity() local
778 if ( !desc ) in pirq_set_affinity()
780 irq_set_affinity(desc, mask); in pirq_set_affinity()
781 spin_unlock_irqrestore(&desc->lock, flags); in pirq_set_affinity()
819 struct irq_desc *desc; in do_IRQ() local
844 desc = irq_to_desc(~irq); in do_IRQ()
845 if ( ~irq < nr_irqs && irq_desc_initialized(desc) ) in do_IRQ()
847 spin_lock(&desc->lock); in do_IRQ()
849 ~irq, *cpumask_bits(desc->affinity), in do_IRQ()
850 *cpumask_bits(desc->arch.cpu_mask), in do_IRQ()
851 *cpumask_bits(desc->arch.old_cpu_mask), in do_IRQ()
852 desc->arch.vector, desc->arch.old_vector, in do_IRQ()
853 desc->handler->typename, desc->status); in do_IRQ()
854 spin_unlock(&desc->lock); in do_IRQ()
862 desc = irq_to_desc(irq); in do_IRQ()
864 spin_lock(&desc->lock); in do_IRQ()
865 desc->handler->ack(desc); in do_IRQ()
867 if ( likely(desc->status & IRQ_GUEST) ) in do_IRQ()
870 unlikely(desc->rl_cnt++ >= irq_ratelimit_threshold) ) in do_IRQ()
873 if ( now < (desc->rl_quantum_start + MILLISECS(10)) ) in do_IRQ()
875 desc->handler->disable(desc); in do_IRQ()
881 if ( likely(list_empty(&desc->rl_link)) ) in do_IRQ()
886 list_add(&desc->rl_link, &irq_ratelimit_list); in do_IRQ()
891 desc->rl_cnt = 0; in do_IRQ()
892 desc->rl_quantum_start = now; in do_IRQ()
901 desc->status &= ~IRQ_REPLAY; in do_IRQ()
902 desc->status |= IRQ_PENDING; in do_IRQ()
908 if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) ) in do_IRQ()
911 desc->status |= IRQ_INPROGRESS; in do_IRQ()
913 action = desc->action; in do_IRQ()
914 while ( desc->status & IRQ_PENDING ) in do_IRQ()
916 desc->status &= ~IRQ_PENDING; in do_IRQ()
917 spin_unlock_irq(&desc->lock); in do_IRQ()
921 spin_lock_irq(&desc->lock); in do_IRQ()
924 desc->status &= ~IRQ_INPROGRESS; in do_IRQ()
927 if ( desc->handler->end ) in do_IRQ()
928 desc->handler->end(desc, vector); in do_IRQ()
930 spin_unlock(&desc->lock); in do_IRQ()
938 struct irq_desc *desc, *tmp; in irq_ratelimit_timer_fn() local
943 list_for_each_entry_safe ( desc, tmp, &irq_ratelimit_list, rl_link ) in irq_ratelimit_timer_fn()
945 spin_lock(&desc->lock); in irq_ratelimit_timer_fn()
946 desc->handler->enable(desc); in irq_ratelimit_timer_fn()
947 list_del(&desc->rl_link); in irq_ratelimit_timer_fn()
948 INIT_LIST_HEAD(&desc->rl_link); in irq_ratelimit_timer_fn()
949 spin_unlock(&desc->lock); in irq_ratelimit_timer_fn()
999 struct irq_desc *desc; in release_irq() local
1003 desc = irq_to_desc(irq); in release_irq()
1005 spin_lock_irqsave(&desc->lock,flags); in release_irq()
1006 action = desc->action; in release_irq()
1007 desc->action = NULL; in release_irq()
1008 desc->handler->shutdown(desc); in release_irq()
1009 desc->status |= IRQ_DISABLED; in release_irq()
1010 spin_unlock_irqrestore(&desc->lock,flags); in release_irq()
1013 do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); in release_irq()
1022 struct irq_desc *desc; in setup_irq() local
1027 desc = irq_to_desc(irq); in setup_irq()
1029 spin_lock_irqsave(&desc->lock,flags); in setup_irq()
1031 if ( desc->action != NULL ) in setup_irq()
1033 spin_unlock_irqrestore(&desc->lock,flags); in setup_irq()
1037 desc->action = new; in setup_irq()
1038 desc->status &= ~IRQ_DISABLED; in setup_irq()
1039 desc->handler->startup(desc); in setup_irq()
1041 spin_unlock_irqrestore(&desc->lock,flags); in setup_irq()
1105 struct irq_desc *desc = data; in irq_guest_eoi_timer_fn() local
1106 unsigned int irq = desc - irq_desc; in irq_guest_eoi_timer_fn()
1111 spin_lock_irqsave(&desc->lock, flags); in irq_guest_eoi_timer_fn()
1113 if ( !(desc->status & IRQ_GUEST) ) in irq_guest_eoi_timer_fn()
1116 action = (irq_guest_action_t *)desc->action; in irq_guest_eoi_timer_fn()
1136 if ( desc->handler->end ) in irq_guest_eoi_timer_fn()
1137 desc->handler->end(desc, 0); in irq_guest_eoi_timer_fn()
1141 spin_unlock_irq(&desc->lock); in irq_guest_eoi_timer_fn()
1142 on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); in irq_guest_eoi_timer_fn()
1143 spin_lock_irq(&desc->lock); in irq_guest_eoi_timer_fn()
1148 spin_unlock_irqrestore(&desc->lock, flags); in irq_guest_eoi_timer_fn()
1153 struct irq_desc *desc = irq_to_desc(irq); in __do_IRQ_guest() local
1154 irq_guest_action_t *action = (irq_guest_action_t *)desc->action; in __do_IRQ_guest()
1164 ASSERT(desc->status & IRQ_DISABLED); in __do_IRQ_guest()
1165 if ( desc->handler->end ) in __do_IRQ_guest()
1166 desc->handler->end(desc, vector); in __do_IRQ_guest()
1222 struct irq_desc *desc; in pirq_spin_lock_irq_desc() local
1232 desc = irq_to_desc(irq); in pirq_spin_lock_irq_desc()
1233 spin_lock_irqsave(&desc->lock, flags); in pirq_spin_lock_irq_desc()
1236 spin_unlock_irqrestore(&desc->lock, flags); in pirq_spin_lock_irq_desc()
1242 return desc; in pirq_spin_lock_irq_desc()
1370 struct irq_desc *desc; in flush_ready_eoi() local
1381 desc = irq_to_desc(irq); in flush_ready_eoi()
1382 spin_lock(&desc->lock); in flush_ready_eoi()
1383 if ( desc->handler->end ) in flush_ready_eoi()
1384 desc->handler->end(desc, peoi[sp].vector); in flush_ready_eoi()
1385 spin_unlock(&desc->lock); in flush_ready_eoi()
1391 static void __set_eoi_ready(struct irq_desc *desc) in __set_eoi_ready() argument
1393 irq_guest_action_t *action = (irq_guest_action_t *)desc->action; in __set_eoi_ready()
1397 irq = desc - irq_desc; in __set_eoi_ready()
1399 if ( !(desc->status & IRQ_GUEST) || in __set_eoi_ready()
1417 struct irq_desc *desc = data; in set_eoi_ready() local
1421 spin_lock(&desc->lock); in set_eoi_ready()
1422 __set_eoi_ready(desc); in set_eoi_ready()
1423 spin_unlock(&desc->lock); in set_eoi_ready()
1430 struct irq_desc *desc; in pirq_guest_eoi() local
1433 desc = pirq_spin_lock_irq_desc(pirq, NULL); in pirq_guest_eoi()
1434 if ( desc ) in pirq_guest_eoi()
1435 desc_guest_eoi(desc, pirq); in pirq_guest_eoi()
1438 void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq) in desc_guest_eoi() argument
1444 if ( !(desc->status & IRQ_GUEST) ) in desc_guest_eoi()
1446 spin_unlock_irq(&desc->lock); in desc_guest_eoi()
1450 action = (irq_guest_action_t *)desc->action; in desc_guest_eoi()
1451 irq = desc - irq_desc; in desc_guest_eoi()
1456 spin_unlock_irq(&desc->lock); in desc_guest_eoi()
1463 if ( desc->handler->end ) in desc_guest_eoi()
1464 desc->handler->end(desc, 0); in desc_guest_eoi()
1465 spin_unlock_irq(&desc->lock); in desc_guest_eoi()
1475 __set_eoi_ready(desc); in desc_guest_eoi()
1476 spin_unlock(&desc->lock); in desc_guest_eoi()
1482 spin_unlock_irq(&desc->lock); in desc_guest_eoi()
1486 on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); in desc_guest_eoi()
1511 struct irq_desc *desc; in pirq_acktype() local
1518 desc = irq_to_desc(irq); in pirq_acktype()
1520 if ( desc->handler == &no_irq_type ) in pirq_acktype()
1527 if ( !strcmp(desc->handler->typename, "IO-APIC-edge") || in pirq_acktype()
1528 !strcmp(desc->handler->typename, "local-APIC-edge") ) in pirq_acktype()
1535 if ( desc->msi_desc ) in pirq_acktype()
1536 return msi_maskable_irq(desc->msi_desc) ? ACKTYPE_NONE : ACKTYPE_EOI; in pirq_acktype()
1542 if ( !strcmp(desc->handler->typename, "IO-APIC-level") ) in pirq_acktype()
1543 return desc->handler->ack == irq_complete_move ? in pirq_acktype()
1547 if ( !strcmp(desc->handler->typename, "XT-PIC") ) in pirq_acktype()
1550 printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq); in pirq_acktype()
1558 struct irq_desc *desc; in pirq_shared() local
1563 desc = domain_spin_lock_irq_desc(d, pirq, &flags); in pirq_shared()
1564 if ( desc == NULL ) in pirq_shared()
1567 action = (irq_guest_action_t *)desc->action; in pirq_shared()
1568 shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1)); in pirq_shared()
1570 spin_unlock_irqrestore(&desc->lock, flags); in pirq_shared()
1578 struct irq_desc *desc; in pirq_guest_bind() local
1586 desc = pirq_spin_lock_irq_desc(pirq, NULL); in pirq_guest_bind()
1587 if ( desc == NULL ) in pirq_guest_bind()
1593 action = (irq_guest_action_t *)desc->action; in pirq_guest_bind()
1594 irq = desc - irq_desc; in pirq_guest_bind()
1596 if ( !(desc->status & IRQ_GUEST) ) in pirq_guest_bind()
1598 if ( desc->action != NULL ) in pirq_guest_bind()
1602 pirq->pirq, v->domain->domain_id, desc->action->name); in pirq_guest_bind()
1609 spin_unlock_irq(&desc->lock); in pirq_guest_bind()
1621 desc->action = (struct irqaction *)action; in pirq_guest_bind()
1628 init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0); in pirq_guest_bind()
1630 desc->status |= IRQ_GUEST; in pirq_guest_bind()
1633 if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) ) in pirq_guest_bind()
1634 desc->handler->set_affinity(desc, cpumask_of(v->processor)); in pirq_guest_bind()
1636 desc->status &= ~IRQ_DISABLED; in pirq_guest_bind()
1637 desc->handler->startup(desc); in pirq_guest_bind()
1655 ASSERT(desc->status & IRQ_DISABLED); in pirq_guest_bind()
1656 spin_unlock_irq(&desc->lock); in pirq_guest_bind()
1678 spin_unlock_irq(&desc->lock); in pirq_guest_bind()
1689 struct domain *d, struct pirq *pirq, struct irq_desc *desc) in __pirq_guest_unbind() argument
1696 action = (irq_guest_action_t *)desc->action; in __pirq_guest_unbind()
1697 irq = desc - irq_desc; in __pirq_guest_unbind()
1706 BUG_ON(!(desc->status & IRQ_GUEST)); in __pirq_guest_unbind()
1720 desc->handler->end ) in __pirq_guest_unbind()
1721 desc->handler->end(desc, 0); in __pirq_guest_unbind()
1730 spin_unlock_irq(&desc->lock); in __pirq_guest_unbind()
1731 on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0); in __pirq_guest_unbind()
1732 spin_lock_irq(&desc->lock); in __pirq_guest_unbind()
1749 desc->handler->disable(desc); in __pirq_guest_unbind()
1750 desc->status |= IRQ_DISABLED; in __pirq_guest_unbind()
1763 spin_unlock_irq(&desc->lock); in __pirq_guest_unbind()
1764 on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1); in __pirq_guest_unbind()
1765 spin_lock_irq(&desc->lock); in __pirq_guest_unbind()
1770 desc->action = NULL; in __pirq_guest_unbind()
1771 desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS); in __pirq_guest_unbind()
1772 desc->handler->shutdown(desc); in __pirq_guest_unbind()
1781 struct irq_desc *desc; in pirq_guest_unbind() local
1787 desc = pirq_spin_lock_irq_desc(pirq, NULL); in pirq_guest_unbind()
1789 if ( desc == NULL ) in pirq_guest_unbind()
1793 desc = irq_to_desc(irq); in pirq_guest_unbind()
1794 spin_lock_irq(&desc->lock); in pirq_guest_unbind()
1799 oldaction = __pirq_guest_unbind(d, pirq, desc); in pirq_guest_unbind()
1802 spin_unlock_irq(&desc->lock); in pirq_guest_unbind()
1816 struct irq_desc *desc; in pirq_guest_force_unbind() local
1824 desc = pirq_spin_lock_irq_desc(pirq, NULL); in pirq_guest_force_unbind()
1825 BUG_ON(desc == NULL); in pirq_guest_force_unbind()
1827 if ( !(desc->status & IRQ_GUEST) ) in pirq_guest_force_unbind()
1830 action = (irq_guest_action_t *)desc->action; in pirq_guest_force_unbind()
1844 oldaction = __pirq_guest_unbind(d, pirq, desc); in pirq_guest_force_unbind()
1847 spin_unlock_irq(&desc->lock); in pirq_guest_force_unbind()
1918 struct irq_desc *desc; in map_domain_pirq() local
1969 desc = irq_to_desc(irq); in map_domain_pirq()
1999 spin_lock_irqsave(&desc->lock, flags); in map_domain_pirq()
2001 if ( desc->handler != &no_irq_type ) in map_domain_pirq()
2003 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2013 while ( !(ret = setup_msi_irq(desc, msi_desc + nr)) ) in map_domain_pirq()
2016 !desc->arch.used_vectors ) in map_domain_pirq()
2018 desc->arch.used_vectors = &pdev->arch.used_vectors; in map_domain_pirq()
2019 if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED ) in map_domain_pirq()
2021 int vector = desc->arch.vector; in map_domain_pirq()
2023 ASSERT(!test_bit(vector, desc->arch.used_vectors)); in map_domain_pirq()
2024 set_bit(vector, desc->arch.used_vectors); in map_domain_pirq()
2033 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2050 desc = irq_to_desc(irq); in map_domain_pirq()
2051 spin_lock_irqsave(&desc->lock, flags); in map_domain_pirq()
2053 if ( desc->handler != &no_irq_type ) in map_domain_pirq()
2056 d->domain_id, irq, pirq + nr, desc->handler->typename); in map_domain_pirq()
2064 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2069 desc = irq_to_desc(msi_desc->irq); in map_domain_pirq()
2070 spin_lock_irqsave(&desc->lock, flags); in map_domain_pirq()
2071 desc->handler = &no_irq_type; in map_domain_pirq()
2072 desc->msi_desc = NULL; in map_domain_pirq()
2073 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2092 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2096 spin_lock_irqsave(&desc->lock, flags); in map_domain_pirq()
2098 spin_unlock_irqrestore(&desc->lock, flags); in map_domain_pirq()
2120 struct irq_desc *desc; in unmap_domain_pirq() local
2142 desc = irq_to_desc(irq); in unmap_domain_pirq()
2143 msi_desc = desc->msi_desc; in unmap_domain_pirq()
2170 spin_lock_irqsave(&desc->lock, flags); in unmap_domain_pirq()
2188 desc->handler = &no_irq_type; in unmap_domain_pirq()
2189 desc->msi_desc = NULL; in unmap_domain_pirq()
2195 spin_unlock_irqrestore(&desc->lock, flags); in unmap_domain_pirq()
2219 desc = NULL; in unmap_domain_pirq()
2223 desc = irq_to_desc(irq); in unmap_domain_pirq()
2224 BUG_ON(desc->msi_desc != msi_desc + i); in unmap_domain_pirq()
2226 spin_lock_irqsave(&desc->lock, flags); in unmap_domain_pirq()
2229 if ( desc ) in unmap_domain_pirq()
2231 spin_unlock_irqrestore(&desc->lock, flags); in unmap_domain_pirq()
2271 struct irq_desc *desc; in dump_irqs() local
2285 desc = irq_to_desc(irq); in dump_irqs()
2287 if ( !irq_desc_initialized(desc) || desc->handler == &no_irq_type ) in dump_irqs()
2292 spin_lock_irqsave(&desc->lock, flags); in dump_irqs()
2295 desc->affinity); in dump_irqs()
2298 irq, keyhandler_scratch, desc->arch.vector, in dump_irqs()
2299 desc->handler->typename, desc->status); in dump_irqs()
2304 if ( desc->status & IRQ_GUEST ) in dump_irqs()
2306 action = (irq_guest_action_t *)desc->action; in dump_irqs()
2326 else if ( desc->action ) in dump_irqs()
2327 printk("%ps()\n", desc->action->handler); in dump_irqs()
2331 spin_unlock_irqrestore(&desc->lock, flags); in dump_irqs()
2357 struct irq_desc *desc; in fixup_irqs() local
2368 desc = irq_to_desc(irq); in fixup_irqs()
2369 if ( !irq_desc_initialized(desc) ) in fixup_irqs()
2372 spin_lock(&desc->lock); in fixup_irqs()
2377 cpumask_and(desc->arch.cpu_mask, desc->arch.cpu_mask, mask); in fixup_irqs()
2379 cpumask_copy(&affinity, desc->affinity); in fixup_irqs()
2380 if ( !desc->action || cpumask_subset(&affinity, mask) ) in fixup_irqs()
2382 spin_unlock(&desc->lock); in fixup_irqs()
2393 if ( desc->handler->disable ) in fixup_irqs()
2394 desc->handler->disable(desc); in fixup_irqs()
2396 if ( desc->handler->set_affinity ) in fixup_irqs()
2397 desc->handler->set_affinity(desc, &affinity); in fixup_irqs()
2401 if ( desc->handler->enable ) in fixup_irqs()
2402 desc->handler->enable(desc); in fixup_irqs()
2404 spin_unlock(&desc->lock); in fixup_irqs()
2424 struct irq_desc *desc; in fixup_eoi() local
2431 desc = irq_to_desc(irq); in fixup_eoi()
2432 if ( !(desc->status & IRQ_GUEST) ) in fixup_eoi()
2434 action = (irq_guest_action_t *)desc->action; in fixup_eoi()
2543 struct irq_desc *desc; in arch_evtchn_bind_pirq() local
2552 desc = irq_to_desc(irq); in arch_evtchn_bind_pirq()
2553 spin_lock_irqsave(&desc->lock, flags); in arch_evtchn_bind_pirq()
2554 if ( desc->msi_desc ) in arch_evtchn_bind_pirq()
2555 guest_mask_msi_irq(desc, 0); in arch_evtchn_bind_pirq()
2556 spin_unlock_irqrestore(&desc->lock, flags); in arch_evtchn_bind_pirq()