1 /*
2 * xen/arch/arm/irq.c
3 *
4 * ARM Interrupt support
5 *
6 * Ian Campbell <ian.campbell@citrix.com>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/lib.h>
21 #include <xen/spinlock.h>
22 #include <xen/irq.h>
23 #include <xen/init.h>
24 #include <xen/errno.h>
25 #include <xen/sched.h>
26
27 #include <asm/gic.h>
28 #include <asm/vgic.h>
29
30 static unsigned int local_irqs_type[NR_LOCAL_IRQS];
31 static DEFINE_SPINLOCK(local_irqs_type_lock);
32
33 /* Describe an IRQ assigned to a guest */
34 struct irq_guest
35 {
36 struct domain *d;
37 unsigned int virq;
38 };
39
ack_none(struct irq_desc * irq)40 static void ack_none(struct irq_desc *irq)
41 {
42 printk("unexpected IRQ trap at irq %02x\n", irq->irq);
43 }
44
end_none(struct irq_desc * irq)45 static void end_none(struct irq_desc *irq) { }
46
47 hw_irq_controller no_irq_type = {
48 .typename = "none",
49 .startup = irq_startup_none,
50 .shutdown = irq_shutdown_none,
51 .enable = irq_enable_none,
52 .disable = irq_disable_none,
53 .ack = ack_none,
54 .end = end_none
55 };
56
57 static irq_desc_t irq_desc[NR_IRQS];
58 static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
59
__irq_to_desc(int irq)60 irq_desc_t *__irq_to_desc(int irq)
61 {
62 if (irq < NR_LOCAL_IRQS) return &this_cpu(local_irq_desc)[irq];
63 return &irq_desc[irq-NR_LOCAL_IRQS];
64 }
65
arch_init_one_irq_desc(struct irq_desc * desc)66 int __init arch_init_one_irq_desc(struct irq_desc *desc)
67 {
68 desc->arch.type = IRQ_TYPE_INVALID;
69 return 0;
70 }
71
72
init_irq_data(void)73 static int __init init_irq_data(void)
74 {
75 int irq;
76
77 for (irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++) {
78 struct irq_desc *desc = irq_to_desc(irq);
79 init_one_irq_desc(desc);
80 desc->irq = irq;
81 desc->action = NULL;
82 }
83
84 return 0;
85 }
86
init_local_irq_data(void)87 static int init_local_irq_data(void)
88 {
89 int irq;
90
91 spin_lock(&local_irqs_type_lock);
92
93 for (irq = 0; irq < NR_LOCAL_IRQS; irq++) {
94 struct irq_desc *desc = irq_to_desc(irq);
95 init_one_irq_desc(desc);
96 desc->irq = irq;
97 desc->action = NULL;
98
99 /* PPIs are included in local_irqs, we copy the IRQ type from
100 * local_irqs_type when bringing up local IRQ for this CPU in
101 * order to pick up any configuration done before this CPU came
102 * up. For interrupts configured after this point this is done in
103 * irq_set_type.
104 */
105 desc->arch.type = local_irqs_type[irq];
106 }
107
108 spin_unlock(&local_irqs_type_lock);
109
110 return 0;
111 }
112
init_IRQ(void)113 void __init init_IRQ(void)
114 {
115 int irq;
116
117 spin_lock(&local_irqs_type_lock);
118 for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
119 local_irqs_type[irq] = IRQ_TYPE_INVALID;
120 spin_unlock(&local_irqs_type_lock);
121
122 BUG_ON(init_local_irq_data() < 0);
123 BUG_ON(init_irq_data() < 0);
124 }
125
init_secondary_IRQ(void)126 void init_secondary_IRQ(void)
127 {
128 BUG_ON(init_local_irq_data() < 0);
129 }
130
irq_get_guest_info(struct irq_desc * desc)131 static inline struct irq_guest *irq_get_guest_info(struct irq_desc *desc)
132 {
133 ASSERT(spin_is_locked(&desc->lock));
134 ASSERT(test_bit(_IRQ_GUEST, &desc->status));
135 ASSERT(desc->action != NULL);
136
137 return desc->action->dev_id;
138 }
139
irq_get_domain(struct irq_desc * desc)140 static inline struct domain *irq_get_domain(struct irq_desc *desc)
141 {
142 return irq_get_guest_info(desc)->d;
143 }
144
irq_set_affinity(struct irq_desc * desc,const cpumask_t * cpu_mask)145 void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
146 {
147 if ( desc != NULL )
148 desc->handler->set_affinity(desc, cpu_mask);
149 }
150
request_irq(unsigned int irq,unsigned int irqflags,void (* handler)(int,void *,struct cpu_user_regs *),const char * devname,void * dev_id)151 int request_irq(unsigned int irq, unsigned int irqflags,
152 void (*handler)(int, void *, struct cpu_user_regs *),
153 const char *devname, void *dev_id)
154 {
155 struct irqaction *action;
156 int retval;
157
158 /*
159 * Sanity-check: shared interrupts must pass in a real dev-ID,
160 * otherwise we'll have trouble later trying to figure out
161 * which interrupt is which (messes up the interrupt freeing
162 * logic etc).
163 */
164 if ( irq >= nr_irqs )
165 return -EINVAL;
166 if ( !handler )
167 return -EINVAL;
168
169 action = xmalloc(struct irqaction);
170 if ( !action )
171 return -ENOMEM;
172
173 action->handler = handler;
174 action->name = devname;
175 action->dev_id = dev_id;
176 action->free_on_release = 1;
177
178 retval = setup_irq(irq, irqflags, action);
179 if ( retval )
180 xfree(action);
181
182 return retval;
183 }
184
185 /* Dispatch an interrupt */
do_IRQ(struct cpu_user_regs * regs,unsigned int irq,int is_fiq)186 void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
187 {
188 struct irq_desc *desc = irq_to_desc(irq);
189
190 perfc_incr(irqs);
191
192 ASSERT(irq >= 16); /* SGIs do not come down this path */
193
194 if (irq < 32)
195 perfc_incr(ppis);
196 else
197 perfc_incr(spis);
198
199 /* TODO: this_cpu(irq_count)++; */
200
201 irq_enter();
202
203 spin_lock(&desc->lock);
204 desc->handler->ack(desc);
205
206 if ( !desc->action )
207 {
208 printk("Unknown %s %#3.3x\n",
209 is_fiq ? "FIQ" : "IRQ", irq);
210 goto out;
211 }
212
213 if ( test_bit(_IRQ_GUEST, &desc->status) )
214 {
215 struct irq_guest *info = irq_get_guest_info(desc);
216
217 perfc_incr(guest_irqs);
218 desc->handler->end(desc);
219
220 set_bit(_IRQ_INPROGRESS, &desc->status);
221
222 /*
223 * The irq cannot be a PPI, we only support delivery of SPIs to
224 * guests.
225 */
226 vgic_vcpu_inject_spi(info->d, info->virq);
227 goto out_no_end;
228 }
229
230 set_bit(_IRQ_PENDING, &desc->status);
231
232 /*
233 * Since we set PENDING, if another processor is handling a different
234 * instance of this same irq, the other processor will take care of it.
235 */
236 if ( test_bit(_IRQ_DISABLED, &desc->status) ||
237 test_bit(_IRQ_INPROGRESS, &desc->status) )
238 goto out;
239
240 set_bit(_IRQ_INPROGRESS, &desc->status);
241
242 while ( test_bit(_IRQ_PENDING, &desc->status) )
243 {
244 struct irqaction *action;
245
246 clear_bit(_IRQ_PENDING, &desc->status);
247 action = desc->action;
248
249 spin_unlock_irq(&desc->lock);
250
251 do
252 {
253 action->handler(irq, action->dev_id, regs);
254 action = action->next;
255 } while ( action );
256
257 spin_lock_irq(&desc->lock);
258 }
259
260 clear_bit(_IRQ_INPROGRESS, &desc->status);
261
262 out:
263 desc->handler->end(desc);
264 out_no_end:
265 spin_unlock(&desc->lock);
266 irq_exit();
267 }
268
release_irq(unsigned int irq,const void * dev_id)269 void release_irq(unsigned int irq, const void *dev_id)
270 {
271 struct irq_desc *desc;
272 unsigned long flags;
273 struct irqaction *action, **action_ptr;
274
275 desc = irq_to_desc(irq);
276
277 spin_lock_irqsave(&desc->lock,flags);
278
279 action_ptr = &desc->action;
280 for ( ;; )
281 {
282 action = *action_ptr;
283 if ( !action )
284 {
285 printk(XENLOG_WARNING "Trying to free already-free IRQ %u\n", irq);
286 spin_unlock_irqrestore(&desc->lock, flags);
287 return;
288 }
289
290 if ( action->dev_id == dev_id )
291 break;
292
293 action_ptr = &action->next;
294 }
295
296 /* Found it - remove it from the action list */
297 *action_ptr = action->next;
298
299 /* If this was the last action, shut down the IRQ */
300 if ( !desc->action )
301 {
302 desc->handler->shutdown(desc);
303 clear_bit(_IRQ_GUEST, &desc->status);
304 }
305
306 spin_unlock_irqrestore(&desc->lock,flags);
307
308 /* Wait to make sure it's not being used on another CPU */
309 do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) );
310
311 if ( action->free_on_release )
312 xfree(action);
313 }
314
__setup_irq(struct irq_desc * desc,unsigned int irqflags,struct irqaction * new)315 static int __setup_irq(struct irq_desc *desc, unsigned int irqflags,
316 struct irqaction *new)
317 {
318 bool shared = irqflags & IRQF_SHARED;
319
320 ASSERT(new != NULL);
321
322 /* Sanity checks:
323 * - if the IRQ is marked as shared
324 * - dev_id is not NULL when IRQF_SHARED is set
325 */
326 if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) )
327 return -EINVAL;
328 if ( shared && new->dev_id == NULL )
329 return -EINVAL;
330
331 if ( shared )
332 set_bit(_IRQF_SHARED, &desc->status);
333
334 new->next = desc->action;
335 dsb(ish);
336 desc->action = new;
337 dsb(ish);
338
339 return 0;
340 }
341
setup_irq(unsigned int irq,unsigned int irqflags,struct irqaction * new)342 int setup_irq(unsigned int irq, unsigned int irqflags, struct irqaction *new)
343 {
344 int rc;
345 unsigned long flags;
346 struct irq_desc *desc;
347 bool disabled;
348
349 desc = irq_to_desc(irq);
350
351 spin_lock_irqsave(&desc->lock, flags);
352
353 if ( test_bit(_IRQ_GUEST, &desc->status) )
354 {
355 struct domain *d = irq_get_domain(desc);
356
357 spin_unlock_irqrestore(&desc->lock, flags);
358 printk(XENLOG_ERR "ERROR: IRQ %u is already in use by the domain %u\n",
359 irq, d->domain_id);
360 return -EBUSY;
361 }
362
363 disabled = (desc->action == NULL);
364
365 rc = __setup_irq(desc, irqflags, new);
366 if ( rc )
367 goto err;
368
369 /* First time the IRQ is setup */
370 if ( disabled )
371 {
372 gic_route_irq_to_xen(desc, GIC_PRI_IRQ);
373 /* It's fine to use smp_processor_id() because:
374 * For PPI: irq_desc is banked
375 * For SPI: we don't care for now which CPU will receive the
376 * interrupt
377 * TODO: Handle case where SPI is setup on different CPU than
378 * the targeted CPU and the priority.
379 */
380 irq_set_affinity(desc, cpumask_of(smp_processor_id()));
381 desc->handler->startup(desc);
382 }
383
384 err:
385 spin_unlock_irqrestore(&desc->lock, flags);
386
387 return rc;
388 }
389
is_assignable_irq(unsigned int irq)390 bool is_assignable_irq(unsigned int irq)
391 {
392 /* For now, we can only route SPIs to the guest */
393 return (irq >= NR_LOCAL_IRQS) && (irq < gic_number_lines());
394 }
395
396 /*
397 * Only the hardware domain is allowed to set the configure the
398 * interrupt type for now.
399 *
400 * XXX: See whether it is possible to let any domain configure the type.
401 */
irq_type_set_by_domain(const struct domain * d)402 bool irq_type_set_by_domain(const struct domain *d)
403 {
404 return (d == hardware_domain);
405 }
406
407 /*
408 * Route an IRQ to a specific guest.
409 * For now only SPIs are assignable to the guest.
410 */
route_irq_to_guest(struct domain * d,unsigned int virq,unsigned int irq,const char * devname)411 int route_irq_to_guest(struct domain *d, unsigned int virq,
412 unsigned int irq, const char * devname)
413 {
414 struct irqaction *action;
415 struct irq_guest *info;
416 struct irq_desc *desc;
417 unsigned long flags;
418 int retval = 0;
419
420 if ( virq >= vgic_num_irqs(d) )
421 {
422 printk(XENLOG_G_ERR
423 "the vIRQ number %u is too high for domain %u (max = %u)\n",
424 irq, d->domain_id, vgic_num_irqs(d));
425 return -EINVAL;
426 }
427
428 /* Only routing to virtual SPIs is supported */
429 if ( virq < NR_LOCAL_IRQS )
430 {
431 printk(XENLOG_G_ERR "IRQ can only be routed to an SPI\n");
432 return -EINVAL;
433 }
434
435 if ( !is_assignable_irq(irq) )
436 {
437 printk(XENLOG_G_ERR "the IRQ%u is not routable\n", irq);
438 return -EINVAL;
439 }
440 desc = irq_to_desc(irq);
441
442 action = xmalloc(struct irqaction);
443 if ( !action )
444 return -ENOMEM;
445
446 info = xmalloc(struct irq_guest);
447 if ( !info )
448 {
449 xfree(action);
450 return -ENOMEM;
451 }
452
453 info->d = d;
454 info->virq = virq;
455
456 action->dev_id = info;
457 action->name = devname;
458 action->free_on_release = 1;
459
460 spin_lock_irqsave(&desc->lock, flags);
461
462 if ( !irq_type_set_by_domain(d) && desc->arch.type == IRQ_TYPE_INVALID )
463 {
464 printk(XENLOG_G_ERR "IRQ %u has not been configured\n", irq);
465 retval = -EIO;
466 goto out;
467 }
468
469 /*
470 * If the IRQ is already used by someone
471 * - If it's the same domain -> Xen doesn't need to update the IRQ desc.
472 * For safety check if we are not trying to assign the IRQ to a
473 * different vIRQ.
474 * - Otherwise -> For now, don't allow the IRQ to be shared between
475 * Xen and domains.
476 */
477 if ( desc->action != NULL )
478 {
479 if ( test_bit(_IRQ_GUEST, &desc->status) )
480 {
481 struct domain *ad = irq_get_domain(desc);
482
483 if ( d != ad )
484 {
485 printk(XENLOG_G_ERR "IRQ %u is already used by domain %u\n",
486 irq, ad->domain_id);
487 retval = -EBUSY;
488 }
489 else if ( irq_get_guest_info(desc)->virq != virq )
490 {
491 printk(XENLOG_G_ERR
492 "d%u: IRQ %u is already assigned to vIRQ %u\n",
493 d->domain_id, irq, irq_get_guest_info(desc)->virq);
494 retval = -EBUSY;
495 }
496 }
497 else
498 {
499 printk(XENLOG_G_ERR "IRQ %u is already used by Xen\n", irq);
500 retval = -EBUSY;
501 }
502 goto out;
503 }
504
505 retval = __setup_irq(desc, 0, action);
506 if ( retval )
507 goto out;
508
509 retval = gic_route_irq_to_guest(d, virq, desc, GIC_PRI_IRQ);
510
511 spin_unlock_irqrestore(&desc->lock, flags);
512
513 if ( retval )
514 {
515 release_irq(desc->irq, info);
516 goto free_info;
517 }
518
519 return 0;
520
521 out:
522 spin_unlock_irqrestore(&desc->lock, flags);
523 xfree(action);
524 free_info:
525 xfree(info);
526
527 return retval;
528 }
529
release_guest_irq(struct domain * d,unsigned int virq)530 int release_guest_irq(struct domain *d, unsigned int virq)
531 {
532 struct irq_desc *desc;
533 struct irq_guest *info;
534 unsigned long flags;
535 struct pending_irq *p;
536 int ret;
537
538 /* Only SPIs are supported */
539 if ( virq < NR_LOCAL_IRQS || virq >= vgic_num_irqs(d) )
540 return -EINVAL;
541
542 p = spi_to_pending(d, virq);
543 if ( !p->desc )
544 return -EINVAL;
545
546 desc = p->desc;
547
548 spin_lock_irqsave(&desc->lock, flags);
549
550 ret = -EINVAL;
551 if ( !test_bit(_IRQ_GUEST, &desc->status) )
552 goto unlock;
553
554 info = irq_get_guest_info(desc);
555 ret = -EINVAL;
556 if ( d != info->d )
557 goto unlock;
558
559 ret = gic_remove_irq_from_guest(d, virq, desc);
560 if ( ret )
561 goto unlock;
562
563 spin_unlock_irqrestore(&desc->lock, flags);
564
565 release_irq(desc->irq, info);
566 xfree(info);
567
568 return 0;
569
570 unlock:
571 spin_unlock_irqrestore(&desc->lock, flags);
572
573 return ret;
574 }
575
576 /*
577 * pirq event channels. We don't use these on ARM, instead we use the
578 * features of the GIC to inject virtualised normal interrupts.
579 */
alloc_pirq_struct(struct domain * d)580 struct pirq *alloc_pirq_struct(struct domain *d)
581 {
582 return NULL;
583 }
584
585 /*
586 * These are all unreachable given an alloc_pirq_struct
587 * which returns NULL, all callers try to lookup struct pirq first
588 * which will fail.
589 */
pirq_guest_bind(struct vcpu * v,struct pirq * pirq,int will_share)590 int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
591 {
592 BUG();
593 }
594
pirq_guest_unbind(struct domain * d,struct pirq * pirq)595 void pirq_guest_unbind(struct domain *d, struct pirq *pirq)
596 {
597 BUG();
598 }
599
pirq_set_affinity(struct domain * d,int pirq,const cpumask_t * mask)600 void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask)
601 {
602 BUG();
603 }
604
irq_validate_new_type(unsigned int curr,unsigned new)605 static bool irq_validate_new_type(unsigned int curr, unsigned new)
606 {
607 return (curr == IRQ_TYPE_INVALID || curr == new );
608 }
609
irq_set_spi_type(unsigned int spi,unsigned int type)610 int irq_set_spi_type(unsigned int spi, unsigned int type)
611 {
612 unsigned long flags;
613 struct irq_desc *desc = irq_to_desc(spi);
614 int ret = -EBUSY;
615
616 /* This function should not be used for other than SPIs */
617 if ( spi < NR_LOCAL_IRQS )
618 return -EINVAL;
619
620 spin_lock_irqsave(&desc->lock, flags);
621
622 if ( !irq_validate_new_type(desc->arch.type, type) )
623 goto err;
624
625 desc->arch.type = type;
626
627 ret = 0;
628
629 err:
630 spin_unlock_irqrestore(&desc->lock, flags);
631 return ret;
632 }
633
irq_local_set_type(unsigned int irq,unsigned int type)634 static int irq_local_set_type(unsigned int irq, unsigned int type)
635 {
636 unsigned int cpu;
637 unsigned int old_type;
638 unsigned long flags;
639 int ret = -EBUSY;
640 struct irq_desc *desc;
641
642 ASSERT(irq < NR_LOCAL_IRQS);
643
644 spin_lock(&local_irqs_type_lock);
645
646 old_type = local_irqs_type[irq];
647
648 if ( !irq_validate_new_type(old_type, type) )
649 goto unlock;
650
651 ret = 0;
652 /* We don't need to reconfigure if the type is correctly set */
653 if ( old_type == type )
654 goto unlock;
655
656 local_irqs_type[irq] = type;
657
658 for_each_cpu( cpu, &cpu_online_map )
659 {
660 desc = &per_cpu(local_irq_desc, cpu)[irq];
661 spin_lock_irqsave(&desc->lock, flags);
662 desc->arch.type = type;
663 spin_unlock_irqrestore(&desc->lock, flags);
664 }
665
666 unlock:
667 spin_unlock(&local_irqs_type_lock);
668 return ret;
669 }
670
irq_set_type(unsigned int irq,unsigned int type)671 int irq_set_type(unsigned int irq, unsigned int type)
672 {
673 int res;
674
675 /* Setup the IRQ type */
676 if ( irq < NR_LOCAL_IRQS )
677 res = irq_local_set_type(irq, type);
678 else
679 res = irq_set_spi_type(irq, type);
680
681 return res;
682 }
683
platform_get_irq(const struct dt_device_node * device,int index)684 int platform_get_irq(const struct dt_device_node *device, int index)
685 {
686 struct dt_irq dt_irq;
687 unsigned int type, irq;
688
689 if ( dt_device_get_irq(device, index, &dt_irq) )
690 return -1;
691
692 irq = dt_irq.irq;
693 type = dt_irq.type;
694
695 if ( irq_set_type(irq, type) )
696 return -1;
697
698 return irq;
699 }
700
701 /*
702 * Local variables:
703 * mode: C
704 * c-file-style: "BSD"
705 * c-basic-offset: 4
706 * indent-tabs-mode: nil
707 * End:
708 */
709