1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * xen/arch/arm/irq.c
4 *
5 * ARM Interrupt support
6 *
7 * Ian Campbell <ian.campbell@citrix.com>
8 * Copyright (c) 2011 Citrix Systems.
9 */
10
11 #include <xen/cpu.h>
12 #include <xen/lib.h>
13 #include <xen/spinlock.h>
14 #include <xen/irq.h>
15 #include <xen/init.h>
16 #include <xen/errno.h>
17 #include <xen/sched.h>
18
19 #include <asm/gic.h>
20 #include <asm/vgic.h>
21
22 const unsigned int nr_irqs = NR_IRQS;
23
24 static unsigned int local_irqs_type[NR_LOCAL_IRQS];
25 static DEFINE_SPINLOCK(local_irqs_type_lock);
26
27 /* Describe an IRQ assigned to a guest */
28 struct irq_guest
29 {
30 struct domain *d;
31 unsigned int virq;
32 };
33
irq_ack_none(struct irq_desc * desc)34 void irq_ack_none(struct irq_desc *desc)
35 {
36 printk("unexpected IRQ trap at irq %02x\n", desc->irq);
37 }
38
irq_end_none(struct irq_desc * irq)39 void irq_end_none(struct irq_desc *irq)
40 {
41 /*
42 * Still allow a CPU to end an interrupt if we receive a spurious
43 * interrupt. This will prevent the CPU to lose interrupt forever.
44 */
45 gic_hw_ops->gic_host_irq_type->end(irq);
46 }
47
48 static irq_desc_t irq_desc[NR_IRQS - NR_LOCAL_IRQS];
49 static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
50
__irq_to_desc(unsigned int irq)51 struct irq_desc *__irq_to_desc(unsigned int irq)
52 {
53 if ( irq < NR_LOCAL_IRQS )
54 return &this_cpu(local_irq_desc)[irq];
55
56 return &irq_desc[irq-NR_LOCAL_IRQS];
57 }
58
arch_init_one_irq_desc(struct irq_desc * desc)59 int arch_init_one_irq_desc(struct irq_desc *desc)
60 {
61 desc->arch.type = IRQ_TYPE_INVALID;
62 return 0;
63 }
64
65
init_irq_data(void)66 static int __init init_irq_data(void)
67 {
68 int irq;
69
70 for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
71 {
72 struct irq_desc *desc = irq_to_desc(irq);
73 int rc = init_one_irq_desc(desc);
74
75 if ( rc )
76 return rc;
77
78 desc->irq = irq;
79 desc->action = NULL;
80 }
81
82 return 0;
83 }
84
init_local_irq_data(unsigned int cpu)85 static int init_local_irq_data(unsigned int cpu)
86 {
87 int irq;
88
89 spin_lock(&local_irqs_type_lock);
90
91 for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
92 {
93 struct irq_desc *desc = &per_cpu(local_irq_desc, cpu)[irq];
94 int rc = init_one_irq_desc(desc);
95
96 if ( rc )
97 {
98 spin_unlock(&local_irqs_type_lock);
99 return rc;
100 }
101
102 desc->irq = irq;
103 desc->action = NULL;
104
105 /* PPIs are included in local_irqs, we copy the IRQ type from
106 * local_irqs_type when bringing up local IRQ for this CPU in
107 * order to pick up any configuration done before this CPU came
108 * up. For interrupts configured after this point this is done in
109 * irq_set_type.
110 */
111 desc->arch.type = local_irqs_type[irq];
112 }
113
114 spin_unlock(&local_irqs_type_lock);
115
116 return 0;
117 }
118
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)119 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
120 void *hcpu)
121 {
122 unsigned int cpu = (unsigned long)hcpu;
123 int rc = 0;
124
125 switch ( action )
126 {
127 case CPU_UP_PREPARE:
128 rc = init_local_irq_data(cpu);
129 if ( rc )
130 printk(XENLOG_ERR "Unable to allocate local IRQ for CPU%u\n",
131 cpu);
132 break;
133 }
134
135 return notifier_from_errno(rc);
136 }
137
138 static struct notifier_block cpu_nfb = {
139 .notifier_call = cpu_callback,
140 };
141
init_IRQ(void)142 void __init init_IRQ(void)
143 {
144 int irq;
145
146 spin_lock(&local_irqs_type_lock);
147 for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
148 {
149 /* SGIs are always edge-triggered */
150 if ( irq < NR_GIC_SGI )
151 local_irqs_type[irq] = IRQ_TYPE_EDGE_RISING;
152 else
153 local_irqs_type[irq] = IRQ_TYPE_INVALID;
154 }
155 spin_unlock(&local_irqs_type_lock);
156
157 BUG_ON(init_local_irq_data(smp_processor_id()) < 0);
158 BUG_ON(init_irq_data() < 0);
159
160 register_cpu_notifier(&cpu_nfb);
161 }
162
irq_get_guest_info(struct irq_desc * desc)163 static inline struct irq_guest *irq_get_guest_info(struct irq_desc *desc)
164 {
165 ASSERT(spin_is_locked(&desc->lock));
166 ASSERT(test_bit(_IRQ_GUEST, &desc->status));
167 ASSERT(desc->action != NULL);
168
169 return desc->action->dev_id;
170 }
171
irq_get_domain(struct irq_desc * desc)172 static inline struct domain *irq_get_domain(struct irq_desc *desc)
173 {
174 return irq_get_guest_info(desc)->d;
175 }
176
irq_set_affinity(struct irq_desc * desc,const cpumask_t * mask)177 void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
178 {
179 if ( desc != NULL )
180 desc->handler->set_affinity(desc, mask);
181 }
182
request_irq(unsigned int irq,unsigned int irqflags,void (* handler)(int irq,void * dev_id),const char * devname,void * dev_id)183 int request_irq(unsigned int irq, unsigned int irqflags,
184 void (*handler)(int irq, void *dev_id),
185 const char *devname, void *dev_id)
186 {
187 struct irqaction *action;
188 int retval;
189
190 /*
191 * Sanity-check: shared interrupts must pass in a real dev-ID,
192 * otherwise we'll have trouble later trying to figure out
193 * which interrupt is which (messes up the interrupt freeing
194 * logic etc).
195 */
196 if ( irq >= nr_irqs )
197 return -EINVAL;
198 if ( !handler )
199 return -EINVAL;
200
201 action = xmalloc(struct irqaction);
202 if ( !action )
203 return -ENOMEM;
204
205 action->handler = handler;
206 action->name = devname;
207 action->dev_id = dev_id;
208 action->free_on_release = 1;
209
210 retval = setup_irq(irq, irqflags, action);
211 if ( retval )
212 xfree(action);
213
214 return retval;
215 }
216
217 /* Dispatch an interrupt */
do_IRQ(struct cpu_user_regs * regs,unsigned int irq,int is_fiq)218 void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
219 {
220 struct irq_desc *desc = irq_to_desc(irq);
221 struct irqaction *action;
222 const struct cpu_user_regs *old_regs = set_irq_regs(regs);
223
224 perfc_incr(irqs);
225
226 /* Statically assigned SGIs do not come down this path */
227 ASSERT(irq >= GIC_SGI_STATIC_MAX);
228
229 if ( irq < NR_GIC_SGI )
230 perfc_incr(ipis);
231 else if ( irq < NR_GIC_LOCAL_IRQS )
232 perfc_incr(ppis);
233 else
234 perfc_incr(spis);
235
236 /* TODO: this_cpu(irq_count)++; */
237
238 irq_enter();
239
240 spin_lock(&desc->lock);
241 desc->handler->ack(desc);
242
243 #ifndef NDEBUG
244 if ( !desc->action )
245 {
246 printk("Unknown %s %#3.3x\n",
247 is_fiq ? "FIQ" : "IRQ", irq);
248 goto out;
249 }
250 #endif
251
252 if ( test_bit(_IRQ_GUEST, &desc->status) )
253 {
254 struct irq_guest *info = irq_get_guest_info(desc);
255
256 perfc_incr(guest_irqs);
257 desc->handler->end(desc);
258
259 set_bit(_IRQ_INPROGRESS, &desc->status);
260
261 /*
262 * The irq cannot be a PPI, we only support delivery of SPIs to
263 * guests.
264 */
265 ASSERT(irq >= NR_GIC_SGI);
266 vgic_inject_irq(info->d, NULL, info->virq, true);
267 goto out_no_end;
268 }
269
270 if ( test_bit(_IRQ_DISABLED, &desc->status) )
271 goto out;
272
273 set_bit(_IRQ_INPROGRESS, &desc->status);
274
275 action = desc->action;
276
277 spin_unlock_irq(&desc->lock);
278
279 do
280 {
281 action->handler(irq, action->dev_id);
282 action = action->next;
283 } while ( action );
284
285 spin_lock_irq(&desc->lock);
286
287 clear_bit(_IRQ_INPROGRESS, &desc->status);
288
289 out:
290 desc->handler->end(desc);
291 out_no_end:
292 spin_unlock(&desc->lock);
293 irq_exit();
294 set_irq_regs(old_regs);
295 }
296
release_irq(unsigned int irq,const void * dev_id)297 void release_irq(unsigned int irq, const void *dev_id)
298 {
299 struct irq_desc *desc;
300 unsigned long flags;
301 struct irqaction *action, **action_ptr;
302
303 desc = irq_to_desc(irq);
304
305 spin_lock_irqsave(&desc->lock,flags);
306
307 action_ptr = &desc->action;
308 for ( ;; )
309 {
310 action = *action_ptr;
311 if ( !action )
312 {
313 printk(XENLOG_WARNING "Trying to free already-free IRQ %u\n", irq);
314 spin_unlock_irqrestore(&desc->lock, flags);
315 return;
316 }
317
318 if ( action->dev_id == dev_id )
319 break;
320
321 action_ptr = &action->next;
322 }
323
324 /* Found it - remove it from the action list */
325 *action_ptr = action->next;
326
327 /* If this was the last action, shut down the IRQ */
328 if ( !desc->action )
329 {
330 desc->handler->shutdown(desc);
331 clear_bit(_IRQ_GUEST, &desc->status);
332 }
333
334 spin_unlock_irqrestore(&desc->lock,flags);
335
336 /* Wait to make sure it's not being used on another CPU */
337 do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) );
338
339 if ( action->free_on_release )
340 xfree(action);
341 }
342
__setup_irq(struct irq_desc * desc,unsigned int irqflags,struct irqaction * new)343 static int __setup_irq(struct irq_desc *desc, unsigned int irqflags,
344 struct irqaction *new)
345 {
346 bool shared = irqflags & IRQF_SHARED;
347
348 ASSERT(new != NULL);
349
350 /* Sanity checks:
351 * - if the IRQ is marked as shared
352 * - dev_id is not NULL when IRQF_SHARED is set
353 */
354 if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) )
355 return -EINVAL;
356 if ( shared && new->dev_id == NULL )
357 return -EINVAL;
358
359 if ( shared )
360 set_bit(_IRQF_SHARED, &desc->status);
361
362 new->next = desc->action;
363 dsb(ish);
364 desc->action = new;
365 dsb(ish);
366
367 return 0;
368 }
369
setup_irq(unsigned int irq,unsigned int irqflags,struct irqaction * new)370 int setup_irq(unsigned int irq, unsigned int irqflags, struct irqaction *new)
371 {
372 int rc;
373 unsigned long flags;
374 struct irq_desc *desc;
375 bool disabled;
376
377 desc = irq_to_desc(irq);
378
379 spin_lock_irqsave(&desc->lock, flags);
380
381 if ( test_bit(_IRQ_GUEST, &desc->status) )
382 {
383 struct domain *d = irq_get_domain(desc);
384
385 spin_unlock_irqrestore(&desc->lock, flags);
386 printk(XENLOG_ERR "ERROR: IRQ %u is already in use by the domain %u\n",
387 irq, d->domain_id);
388 return -EBUSY;
389 }
390
391 disabled = (desc->action == NULL);
392
393 rc = __setup_irq(desc, irqflags, new);
394 if ( rc )
395 goto err;
396
397 /* First time the IRQ is setup */
398 if ( disabled )
399 {
400 gic_route_irq_to_xen(desc, GIC_PRI_IRQ);
401 /* It's fine to use smp_processor_id() because:
402 * For SGI and PPI: irq_desc is banked
403 * For SPI: we don't care for now which CPU will receive the
404 * interrupt
405 * TODO: Handle case where SPI is setup on different CPU than
406 * the targeted CPU and the priority.
407 */
408 irq_set_affinity(desc, cpumask_of(smp_processor_id()));
409 desc->handler->startup(desc);
410 }
411
412 err:
413 spin_unlock_irqrestore(&desc->lock, flags);
414
415 return rc;
416 }
417
is_assignable_irq(unsigned int irq)418 bool is_assignable_irq(unsigned int irq)
419 {
420 /* For now, we can only route SPIs to the guest */
421 return (irq >= NR_LOCAL_IRQS) && (irq < gic_number_lines());
422 }
423
424 /*
425 * Only the hardware domain is allowed to set the configure the
426 * interrupt type for now.
427 *
428 * XXX: See whether it is possible to let any domain configure the type.
429 */
irq_type_set_by_domain(const struct domain * d)430 bool irq_type_set_by_domain(const struct domain *d)
431 {
432 return is_hardware_domain(d);
433 }
434
435 /*
436 * Route an IRQ to a specific guest.
437 * For now only SPIs are assignable to the guest.
438 */
route_irq_to_guest(struct domain * d,unsigned int virq,unsigned int irq,const char * devname)439 int route_irq_to_guest(struct domain *d, unsigned int virq,
440 unsigned int irq, const char * devname)
441 {
442 struct irqaction *action;
443 struct irq_guest *info;
444 struct irq_desc *desc;
445 unsigned long flags;
446 int retval = 0;
447
448 if ( virq >= vgic_num_irqs(d) )
449 {
450 printk(XENLOG_G_ERR
451 "the vIRQ number %u is too high for domain %u (max = %u)\n",
452 irq, d->domain_id, vgic_num_irqs(d));
453 return -EINVAL;
454 }
455
456 /* Only routing to virtual SPIs is supported */
457 if ( virq < NR_LOCAL_IRQS )
458 {
459 printk(XENLOG_G_ERR "IRQ can only be routed to an SPI\n");
460 return -EINVAL;
461 }
462
463 if ( !is_assignable_irq(irq) )
464 {
465 printk(XENLOG_G_ERR "the IRQ%u is not routable\n", irq);
466 return -EINVAL;
467 }
468 desc = irq_to_desc(irq);
469
470 action = xmalloc(struct irqaction);
471 if ( !action )
472 return -ENOMEM;
473
474 info = xmalloc(struct irq_guest);
475 if ( !info )
476 {
477 xfree(action);
478 return -ENOMEM;
479 }
480
481 info->d = d;
482 info->virq = virq;
483
484 action->dev_id = info;
485 action->name = devname;
486 action->free_on_release = 1;
487
488 spin_lock_irqsave(&desc->lock, flags);
489
490 if ( !irq_type_set_by_domain(d) && desc->arch.type == IRQ_TYPE_INVALID )
491 {
492 printk(XENLOG_G_ERR "IRQ %u has not been configured\n", irq);
493 retval = -EIO;
494 goto out;
495 }
496
497 /*
498 * If the IRQ is already used by someone
499 * - If it's the same domain -> Xen doesn't need to update the IRQ desc.
500 * For safety check if we are not trying to assign the IRQ to a
501 * different vIRQ.
502 * - Otherwise -> For now, don't allow the IRQ to be shared between
503 * Xen and domains.
504 */
505 if ( desc->action != NULL )
506 {
507 if ( test_bit(_IRQ_GUEST, &desc->status) )
508 {
509 struct domain *ad = irq_get_domain(desc);
510
511 if ( d != ad )
512 {
513 printk(XENLOG_G_ERR "IRQ %u is already used by domain %u\n",
514 irq, ad->domain_id);
515 retval = -EBUSY;
516 }
517 else if ( irq_get_guest_info(desc)->virq != virq )
518 {
519 printk(XENLOG_G_ERR
520 "d%u: IRQ %u is already assigned to vIRQ %u\n",
521 d->domain_id, irq, irq_get_guest_info(desc)->virq);
522 retval = -EBUSY;
523 }
524 }
525 else
526 {
527 printk(XENLOG_G_ERR "IRQ %u is already used by Xen\n", irq);
528 retval = -EBUSY;
529 }
530 goto out;
531 }
532
533 retval = __setup_irq(desc, 0, action);
534 if ( retval )
535 goto out;
536
537 retval = gic_route_irq_to_guest(d, virq, desc, GIC_PRI_IRQ);
538
539 spin_unlock_irqrestore(&desc->lock, flags);
540
541 if ( retval )
542 {
543 release_irq(desc->irq, info);
544 goto free_info;
545 }
546
547 return 0;
548
549 out:
550 spin_unlock_irqrestore(&desc->lock, flags);
551 xfree(action);
552 free_info:
553 xfree(info);
554
555 return retval;
556 }
557
release_guest_irq(struct domain * d,unsigned int virq)558 int release_guest_irq(struct domain *d, unsigned int virq)
559 {
560 struct irq_desc *desc;
561 struct irq_guest *info;
562 unsigned long flags;
563 int ret;
564
565 /* Only SPIs are supported */
566 if ( virq < NR_LOCAL_IRQS || virq >= vgic_num_irqs(d) )
567 return -EINVAL;
568
569 desc = vgic_get_hw_irq_desc(d, NULL, virq);
570 if ( !desc )
571 return -EINVAL;
572
573 spin_lock_irqsave(&desc->lock, flags);
574
575 ret = -EINVAL;
576 if ( !test_bit(_IRQ_GUEST, &desc->status) )
577 goto unlock;
578
579 info = irq_get_guest_info(desc);
580 ret = -EINVAL;
581 if ( d != info->d )
582 goto unlock;
583
584 ret = gic_remove_irq_from_guest(d, virq, desc);
585 if ( ret )
586 goto unlock;
587
588 spin_unlock_irqrestore(&desc->lock, flags);
589
590 release_irq(desc->irq, info);
591 xfree(info);
592
593 return 0;
594
595 unlock:
596 spin_unlock_irqrestore(&desc->lock, flags);
597
598 return ret;
599 }
600
irq_validate_new_type(unsigned int curr,unsigned int new)601 static bool irq_validate_new_type(unsigned int curr, unsigned int new)
602 {
603 return (curr == IRQ_TYPE_INVALID || curr == new );
604 }
605
irq_set_spi_type(unsigned int spi,unsigned int type)606 int irq_set_spi_type(unsigned int spi, unsigned int type)
607 {
608 unsigned long flags;
609 struct irq_desc *desc = irq_to_desc(spi);
610 int ret = -EBUSY;
611
612 /* This function should not be used for other than SPIs */
613 if ( spi < NR_LOCAL_IRQS )
614 return -EINVAL;
615
616 spin_lock_irqsave(&desc->lock, flags);
617
618 if ( !irq_validate_new_type(desc->arch.type, type) )
619 goto err;
620
621 desc->arch.type = type;
622
623 ret = 0;
624
625 err:
626 spin_unlock_irqrestore(&desc->lock, flags);
627 return ret;
628 }
629
irq_local_set_type(unsigned int irq,unsigned int type)630 static int irq_local_set_type(unsigned int irq, unsigned int type)
631 {
632 unsigned int cpu;
633 unsigned int old_type;
634 unsigned long flags;
635 int ret = -EBUSY;
636 struct irq_desc *desc;
637
638 ASSERT(irq < NR_LOCAL_IRQS);
639
640 spin_lock(&local_irqs_type_lock);
641
642 old_type = local_irqs_type[irq];
643
644 if ( !irq_validate_new_type(old_type, type) )
645 goto unlock;
646
647 ret = 0;
648 /* We don't need to reconfigure if the type is correctly set */
649 if ( old_type == type )
650 goto unlock;
651
652 local_irqs_type[irq] = type;
653
654 for_each_cpu( cpu, &cpu_online_map )
655 {
656 desc = &per_cpu(local_irq_desc, cpu)[irq];
657 spin_lock_irqsave(&desc->lock, flags);
658 desc->arch.type = type;
659 spin_unlock_irqrestore(&desc->lock, flags);
660 }
661
662 unlock:
663 spin_unlock(&local_irqs_type_lock);
664 return ret;
665 }
666
irq_set_type(unsigned int irq,unsigned int type)667 int irq_set_type(unsigned int irq, unsigned int type)
668 {
669 int res;
670
671 /* Setup the IRQ type */
672 if ( irq < NR_LOCAL_IRQS )
673 res = irq_local_set_type(irq, type);
674 else
675 res = irq_set_spi_type(irq, type);
676
677 return res;
678 }
679
platform_get_irq(const struct dt_device_node * device,int index)680 int platform_get_irq(const struct dt_device_node *device, int index)
681 {
682 struct dt_irq dt_irq;
683 unsigned int type, irq;
684
685 if ( dt_device_get_irq(device, index, &dt_irq) )
686 return -1;
687
688 irq = dt_irq.irq;
689 type = dt_irq.type;
690
691 if ( irq_set_type(irq, type) )
692 return -1;
693
694 return irq;
695 }
696
platform_get_irq_byname(const struct dt_device_node * np,const char * name)697 int platform_get_irq_byname(const struct dt_device_node *np, const char *name)
698 {
699 int index;
700
701 if ( unlikely(!name) )
702 return -EINVAL;
703
704 index = dt_property_match_string(np, "interrupt-names", name);
705 if ( index < 0 )
706 return index;
707
708 return platform_get_irq(np, index);
709 }
710
711 /*
712 * Local variables:
713 * mode: C
714 * c-file-style: "BSD"
715 * c-basic-offset: 4
716 * indent-tabs-mode: nil
717 * End:
718 */
719