1 /*
2 * xen/arch/arm/gic.c
3 *
4 * ARM Generic Interrupt Controller support
5 *
6 * Tim Deegan <tim@xen.org>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/lib.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/irq.h>
24 #include <xen/sched.h>
25 #include <xen/errno.h>
26 #include <xen/softirq.h>
27 #include <xen/list.h>
28 #include <xen/device_tree.h>
29 #include <xen/acpi.h>
30 #include <asm/p2m.h>
31 #include <asm/domain.h>
32 #include <asm/platform.h>
33 #include <asm/device.h>
34 #include <asm/io.h>
35 #include <asm/gic.h>
36 #include <asm/vgic.h>
37 #include <asm/acpi.h>
38
39 static void gic_restore_pending_irqs(struct vcpu *v);
40
41 static DEFINE_PER_CPU(uint64_t, lr_mask);
42
43 #define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_hw_ops->info->nr_lrs) - 1))
44
45 #undef GIC_DEBUG
46
47 static void gic_update_one_lr(struct vcpu *v, int i);
48
49 static const struct gic_hw_operations *gic_hw_ops;
50
register_gic_ops(const struct gic_hw_operations * ops)51 void register_gic_ops(const struct gic_hw_operations *ops)
52 {
53 gic_hw_ops = ops;
54 }
55
clear_cpu_lr_mask(void)56 static void clear_cpu_lr_mask(void)
57 {
58 this_cpu(lr_mask) = 0ULL;
59 }
60
gic_hw_version(void)61 enum gic_version gic_hw_version(void)
62 {
63 return gic_hw_ops->info->hw_version;
64 }
65
gic_number_lines(void)66 unsigned int gic_number_lines(void)
67 {
68 return gic_hw_ops->info->nr_lines;
69 }
70
gic_save_state(struct vcpu * v)71 void gic_save_state(struct vcpu *v)
72 {
73 ASSERT(!local_irq_is_enabled());
74 ASSERT(!is_idle_vcpu(v));
75
76 /* No need for spinlocks here because interrupts are disabled around
77 * this call and it only accesses struct vcpu fields that cannot be
78 * accessed simultaneously by another pCPU.
79 */
80 v->arch.lr_mask = this_cpu(lr_mask);
81 gic_hw_ops->save_state(v);
82 isb();
83 }
84
gic_restore_state(struct vcpu * v)85 void gic_restore_state(struct vcpu *v)
86 {
87 ASSERT(!local_irq_is_enabled());
88 ASSERT(!is_idle_vcpu(v));
89
90 this_cpu(lr_mask) = v->arch.lr_mask;
91 gic_hw_ops->restore_state(v);
92
93 isb();
94
95 gic_restore_pending_irqs(v);
96 }
97
98 /* desc->irq needs to be disabled before calling this function */
gic_set_irq_type(struct irq_desc * desc,unsigned int type)99 void gic_set_irq_type(struct irq_desc *desc, unsigned int type)
100 {
101 /*
102 * IRQ must be disabled before configuring it (see 4.3.13 in ARM IHI
103 * 0048B.b). We rely on the caller to do it.
104 */
105 ASSERT(test_bit(_IRQ_DISABLED, &desc->status));
106 ASSERT(spin_is_locked(&desc->lock));
107 ASSERT(type != IRQ_TYPE_INVALID);
108
109 gic_hw_ops->set_irq_type(desc, type);
110 }
111
gic_set_irq_priority(struct irq_desc * desc,unsigned int priority)112 static void gic_set_irq_priority(struct irq_desc *desc, unsigned int priority)
113 {
114 gic_hw_ops->set_irq_priority(desc, priority);
115 }
116
117 /* Program the GIC to route an interrupt to the host (i.e. Xen)
118 * - needs to be called with desc.lock held
119 */
gic_route_irq_to_xen(struct irq_desc * desc,unsigned int priority)120 void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority)
121 {
122 ASSERT(priority <= 0xff); /* Only 8 bits of priority */
123 ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */
124 ASSERT(test_bit(_IRQ_DISABLED, &desc->status));
125 ASSERT(spin_is_locked(&desc->lock));
126
127 desc->handler = gic_hw_ops->gic_host_irq_type;
128
129 gic_set_irq_type(desc, desc->arch.type);
130 gic_set_irq_priority(desc, priority);
131 }
132
133 /* Program the GIC to route an interrupt to a guest
134 * - desc.lock must be held
135 */
gic_route_irq_to_guest(struct domain * d,unsigned int virq,struct irq_desc * desc,unsigned int priority)136 int gic_route_irq_to_guest(struct domain *d, unsigned int virq,
137 struct irq_desc *desc, unsigned int priority)
138 {
139 unsigned long flags;
140 /* Use vcpu0 to retrieve the pending_irq struct. Given that we only
141 * route SPIs to guests, it doesn't make any difference. */
142 struct vcpu *v_target = vgic_get_target_vcpu(d->vcpu[0], virq);
143 struct vgic_irq_rank *rank = vgic_rank_irq(v_target, virq);
144 struct pending_irq *p = irq_to_pending(v_target, virq);
145 int res = -EBUSY;
146
147 ASSERT(spin_is_locked(&desc->lock));
148 /* Caller has already checked that the IRQ is an SPI */
149 ASSERT(virq >= 32);
150 ASSERT(virq < vgic_num_irqs(d));
151 ASSERT(!is_lpi(virq));
152
153 vgic_lock_rank(v_target, rank, flags);
154
155 if ( p->desc ||
156 /* The VIRQ should not be already enabled by the guest */
157 test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
158 goto out;
159
160 desc->handler = gic_hw_ops->gic_guest_irq_type;
161 set_bit(_IRQ_GUEST, &desc->status);
162
163 if ( !irq_type_set_by_domain(d) )
164 gic_set_irq_type(desc, desc->arch.type);
165 gic_set_irq_priority(desc, priority);
166
167 p->desc = desc;
168 res = 0;
169
170 out:
171 vgic_unlock_rank(v_target, rank, flags);
172
173 return res;
174 }
175
176 /* This function only works with SPIs for now */
gic_remove_irq_from_guest(struct domain * d,unsigned int virq,struct irq_desc * desc)177 int gic_remove_irq_from_guest(struct domain *d, unsigned int virq,
178 struct irq_desc *desc)
179 {
180 struct vcpu *v_target = vgic_get_target_vcpu(d->vcpu[0], virq);
181 struct vgic_irq_rank *rank = vgic_rank_irq(v_target, virq);
182 struct pending_irq *p = irq_to_pending(v_target, virq);
183 unsigned long flags;
184
185 ASSERT(spin_is_locked(&desc->lock));
186 ASSERT(test_bit(_IRQ_GUEST, &desc->status));
187 ASSERT(p->desc == desc);
188 ASSERT(!is_lpi(virq));
189
190 vgic_lock_rank(v_target, rank, flags);
191
192 if ( d->is_dying )
193 {
194 desc->handler->shutdown(desc);
195
196 /* EOI the IRQ if it has not been done by the guest */
197 if ( test_bit(_IRQ_INPROGRESS, &desc->status) )
198 gic_hw_ops->deactivate_irq(desc);
199 clear_bit(_IRQ_INPROGRESS, &desc->status);
200 }
201 else
202 {
203 /*
204 * TODO: Handle eviction from LRs For now, deny
205 * remove if the IRQ is inflight or not disabled.
206 */
207 if ( test_bit(_IRQ_INPROGRESS, &desc->status) ||
208 !test_bit(_IRQ_DISABLED, &desc->status) )
209 {
210 vgic_unlock_rank(v_target, rank, flags);
211 return -EBUSY;
212 }
213 }
214
215 clear_bit(_IRQ_GUEST, &desc->status);
216 desc->handler = &no_irq_type;
217
218 p->desc = NULL;
219
220 vgic_unlock_rank(v_target, rank, flags);
221
222 return 0;
223 }
224
gic_irq_xlate(const u32 * intspec,unsigned int intsize,unsigned int * out_hwirq,unsigned int * out_type)225 int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
226 unsigned int *out_hwirq,
227 unsigned int *out_type)
228 {
229 if ( intsize < 3 )
230 return -EINVAL;
231
232 /* Get the interrupt number and add 16 to skip over SGIs */
233 *out_hwirq = intspec[1] + 16;
234
235 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
236 if ( !intspec[0] )
237 *out_hwirq += 16;
238
239 if ( out_type )
240 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
241
242 return 0;
243 }
244
245 /* Map extra GIC MMIO, irqs and other hw stuffs to the hardware domain. */
gic_map_hwdom_extra_mappings(struct domain * d)246 int gic_map_hwdom_extra_mappings(struct domain *d)
247 {
248 if ( gic_hw_ops->map_hwdom_extra_mappings )
249 return gic_hw_ops->map_hwdom_extra_mappings(d);
250
251 return 0;
252 }
253
gic_dt_preinit(void)254 static void __init gic_dt_preinit(void)
255 {
256 int rc;
257 struct dt_device_node *node;
258 uint8_t num_gics = 0;
259
260 dt_for_each_device_node( dt_host, node )
261 {
262 if ( !dt_get_property(node, "interrupt-controller", NULL) )
263 continue;
264
265 if ( !dt_get_parent(node) )
266 continue;
267
268 rc = device_init(node, DEVICE_GIC, NULL);
269 if ( !rc )
270 {
271 /* NOTE: Only one GIC is supported */
272 num_gics = 1;
273 break;
274 }
275 }
276 if ( !num_gics )
277 panic("Unable to find compatible GIC in the device tree");
278
279 /* Set the GIC as the primary interrupt controller */
280 dt_interrupt_controller = node;
281 dt_device_set_used_by(node, DOMID_XEN);
282 }
283
284 #ifdef CONFIG_ACPI
gic_acpi_preinit(void)285 static void __init gic_acpi_preinit(void)
286 {
287 struct acpi_subtable_header *header;
288 struct acpi_madt_generic_distributor *dist;
289
290 header = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
291 if ( !header )
292 panic("No valid GICD entries exists");
293
294 dist = container_of(header, struct acpi_madt_generic_distributor, header);
295
296 if ( acpi_device_init(DEVICE_GIC, NULL, dist->version) )
297 panic("Unable to find compatible GIC in the ACPI table");
298 }
299 #else
gic_acpi_preinit(void)300 static void __init gic_acpi_preinit(void) { }
301 #endif
302
303 /* Find the interrupt controller and set up the callback to translate
304 * device tree or ACPI IRQ.
305 */
gic_preinit(void)306 void __init gic_preinit(void)
307 {
308 if ( acpi_disabled )
309 gic_dt_preinit();
310 else
311 gic_acpi_preinit();
312 }
313
314 /* Set up the GIC */
gic_init(void)315 void __init gic_init(void)
316 {
317 if ( gic_hw_ops->init() )
318 panic("Failed to initialize the GIC drivers");
319 /* Clear LR mask for cpu0 */
320 clear_cpu_lr_mask();
321 }
322
send_SGI_mask(const cpumask_t * cpumask,enum gic_sgi sgi)323 void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
324 {
325 ASSERT(sgi < 16); /* There are only 16 SGIs */
326
327 dsb(sy);
328 gic_hw_ops->send_SGI(sgi, SGI_TARGET_LIST, cpumask);
329 }
330
send_SGI_one(unsigned int cpu,enum gic_sgi sgi)331 void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
332 {
333 send_SGI_mask(cpumask_of(cpu), sgi);
334 }
335
send_SGI_self(enum gic_sgi sgi)336 void send_SGI_self(enum gic_sgi sgi)
337 {
338 ASSERT(sgi < 16); /* There are only 16 SGIs */
339
340 dsb(sy);
341 gic_hw_ops->send_SGI(sgi, SGI_TARGET_SELF, NULL);
342 }
343
send_SGI_allbutself(enum gic_sgi sgi)344 void send_SGI_allbutself(enum gic_sgi sgi)
345 {
346 ASSERT(sgi < 16); /* There are only 16 SGIs */
347
348 dsb(sy);
349 gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
350 }
351
smp_send_state_dump(unsigned int cpu)352 void smp_send_state_dump(unsigned int cpu)
353 {
354 send_SGI_one(cpu, GIC_SGI_DUMP_STATE);
355 }
356
357 /* Set up the per-CPU parts of the GIC for a secondary CPU */
gic_init_secondary_cpu(void)358 void gic_init_secondary_cpu(void)
359 {
360 gic_hw_ops->secondary_init();
361 /* Clear LR mask for secondary cpus */
362 clear_cpu_lr_mask();
363 }
364
365 /* Shut down the per-CPU GIC interface */
gic_disable_cpu(void)366 void gic_disable_cpu(void)
367 {
368 ASSERT(!local_irq_is_enabled());
369
370 gic_hw_ops->disable_interface();
371 }
372
gic_set_lr(int lr,struct pending_irq * p,unsigned int state)373 static inline void gic_set_lr(int lr, struct pending_irq *p,
374 unsigned int state)
375 {
376 ASSERT(!local_irq_is_enabled());
377
378 clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status);
379
380 gic_hw_ops->update_lr(lr, p, state);
381
382 set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
383 clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
384 p->lr = lr;
385 }
386
gic_add_to_lr_pending(struct vcpu * v,struct pending_irq * n)387 static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n)
388 {
389 struct pending_irq *iter;
390
391 ASSERT(spin_is_locked(&v->arch.vgic.lock));
392
393 if ( !list_empty(&n->lr_queue) )
394 return;
395
396 list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue )
397 {
398 if ( iter->priority > n->priority )
399 {
400 list_add_tail(&n->lr_queue, &iter->lr_queue);
401 return;
402 }
403 }
404 list_add_tail(&n->lr_queue, &v->arch.vgic.lr_pending);
405 }
406
gic_remove_from_lr_pending(struct vcpu * v,struct pending_irq * p)407 void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p)
408 {
409 ASSERT(spin_is_locked(&v->arch.vgic.lock));
410
411 list_del_init(&p->lr_queue);
412 }
413
gic_remove_irq_from_queues(struct vcpu * v,struct pending_irq * p)414 void gic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p)
415 {
416 ASSERT(spin_is_locked(&v->arch.vgic.lock));
417
418 clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
419 list_del_init(&p->inflight);
420 gic_remove_from_lr_pending(v, p);
421 }
422
gic_raise_inflight_irq(struct vcpu * v,unsigned int virtual_irq)423 void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
424 {
425 struct pending_irq *n = irq_to_pending(v, virtual_irq);
426
427 /* If an LPI has been removed meanwhile, there is nothing left to raise. */
428 if ( unlikely(!n) )
429 return;
430
431 ASSERT(spin_is_locked(&v->arch.vgic.lock));
432
433 /* Don't try to update the LR if the interrupt is disabled */
434 if ( !test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
435 return;
436
437 if ( list_empty(&n->lr_queue) )
438 {
439 if ( v == current )
440 gic_update_one_lr(v, n->lr);
441 }
442 #ifdef GIC_DEBUG
443 else
444 gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into d%dv%d, when it is still lr_pending\n",
445 virtual_irq, v->domain->domain_id, v->vcpu_id);
446 #endif
447 }
448
449 /*
450 * Find an unused LR to insert an IRQ into, starting with the LR given
451 * by @lr. If this new interrupt is a PRISTINE LPI, scan the other LRs to
452 * avoid inserting the same IRQ twice. This situation can occur when an
453 * event gets discarded while the LPI is in an LR, and a new LPI with the
454 * same number gets mapped quickly afterwards.
455 */
gic_find_unused_lr(struct vcpu * v,struct pending_irq * p,unsigned int lr)456 static unsigned int gic_find_unused_lr(struct vcpu *v,
457 struct pending_irq *p,
458 unsigned int lr)
459 {
460 unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
461 unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask);
462 struct gic_lr lr_val;
463
464 ASSERT(spin_is_locked(&v->arch.vgic.lock));
465
466 if ( unlikely(test_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
467 {
468 unsigned int used_lr;
469
470 for_each_set_bit(used_lr, lr_mask, nr_lrs)
471 {
472 gic_hw_ops->read_lr(used_lr, &lr_val);
473 if ( lr_val.virq == p->irq )
474 return used_lr;
475 }
476 }
477
478 lr = find_next_zero_bit(lr_mask, nr_lrs, lr);
479
480 return lr;
481 }
482
gic_raise_guest_irq(struct vcpu * v,unsigned int virtual_irq,unsigned int priority)483 void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
484 unsigned int priority)
485 {
486 int i;
487 unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
488 struct pending_irq *p = irq_to_pending(v, virtual_irq);
489
490 ASSERT(spin_is_locked(&v->arch.vgic.lock));
491
492 if ( unlikely(!p) )
493 /* An unmapped LPI does not need to be raised. */
494 return;
495
496 if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
497 {
498 i = gic_find_unused_lr(v, p, 0);
499
500 if (i < nr_lrs) {
501 set_bit(i, &this_cpu(lr_mask));
502 gic_set_lr(i, p, GICH_LR_PENDING);
503 return;
504 }
505 }
506
507 gic_add_to_lr_pending(v, p);
508 }
509
gic_update_one_lr(struct vcpu * v,int i)510 static void gic_update_one_lr(struct vcpu *v, int i)
511 {
512 struct pending_irq *p;
513 int irq;
514 struct gic_lr lr_val;
515
516 ASSERT(spin_is_locked(&v->arch.vgic.lock));
517 ASSERT(!local_irq_is_enabled());
518
519 gic_hw_ops->read_lr(i, &lr_val);
520 irq = lr_val.virq;
521 p = irq_to_pending(v, irq);
522 /*
523 * An LPI might have been unmapped, in which case we just clean up here.
524 * If that LPI is marked as PRISTINE, the information in the LR is bogus,
525 * as it belongs to a previous, already unmapped LPI. So we discard it
526 * here as well.
527 */
528 if ( unlikely(!p ||
529 test_and_clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
530 {
531 ASSERT(is_lpi(irq));
532
533 gic_hw_ops->clear_lr(i);
534 clear_bit(i, &this_cpu(lr_mask));
535
536 return;
537 }
538
539 if ( lr_val.state & GICH_LR_ACTIVE )
540 {
541 set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
542 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
543 test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
544 {
545 if ( p->desc == NULL )
546 {
547 lr_val.state |= GICH_LR_PENDING;
548 gic_hw_ops->write_lr(i, &lr_val);
549 }
550 else
551 gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into d%dv%d: already active in LR%d\n",
552 irq, v->domain->domain_id, v->vcpu_id, i);
553 }
554 }
555 else if ( lr_val.state & GICH_LR_PENDING )
556 {
557 int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
558 #ifdef GIC_DEBUG
559 if ( q )
560 gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when it is already pending in LR%d\n",
561 irq, v->domain->domain_id, v->vcpu_id, i);
562 #endif
563 }
564 else
565 {
566 gic_hw_ops->clear_lr(i);
567 clear_bit(i, &this_cpu(lr_mask));
568
569 if ( p->desc != NULL )
570 clear_bit(_IRQ_INPROGRESS, &p->desc->status);
571 clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
572 clear_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
573 p->lr = GIC_INVALID_LR;
574 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
575 test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&
576 !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
577 gic_raise_guest_irq(v, irq, p->priority);
578 else {
579 list_del_init(&p->inflight);
580 /*
581 * Remove from inflight, then change physical affinity. It
582 * makes sure that when a new interrupt is received on the
583 * next pcpu, inflight is already cleared. No concurrent
584 * accesses to inflight.
585 */
586 smp_wmb();
587 if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
588 {
589 struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
590 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
591 clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
592 }
593 }
594 }
595 }
596
gic_clear_lrs(struct vcpu * v)597 void gic_clear_lrs(struct vcpu *v)
598 {
599 int i = 0;
600 unsigned long flags;
601 unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
602
603 /* The idle domain has no LRs to be cleared. Since gic_restore_state
604 * doesn't write any LR registers for the idle domain they could be
605 * non-zero. */
606 if ( is_idle_vcpu(v) )
607 return;
608
609 gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false);
610
611 spin_lock_irqsave(&v->arch.vgic.lock, flags);
612
613 while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask),
614 nr_lrs, i)) < nr_lrs ) {
615 gic_update_one_lr(v, i);
616 i++;
617 }
618
619 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
620 }
621
gic_restore_pending_irqs(struct vcpu * v)622 static void gic_restore_pending_irqs(struct vcpu *v)
623 {
624 int lr = 0;
625 struct pending_irq *p, *t, *p_r;
626 struct list_head *inflight_r;
627 unsigned long flags;
628 unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
629 int lrs = nr_lrs;
630
631 spin_lock_irqsave(&v->arch.vgic.lock, flags);
632
633 if ( list_empty(&v->arch.vgic.lr_pending) )
634 goto out;
635
636 inflight_r = &v->arch.vgic.inflight_irqs;
637 list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
638 {
639 lr = gic_find_unused_lr(v, p, lr);
640 if ( lr >= nr_lrs )
641 {
642 /* No more free LRs: find a lower priority irq to evict */
643 list_for_each_entry_reverse( p_r, inflight_r, inflight )
644 {
645 if ( p_r->priority == p->priority )
646 goto out;
647 if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status) &&
648 !test_bit(GIC_IRQ_GUEST_ACTIVE, &p_r->status) )
649 goto found;
650 }
651 /* We didn't find a victim this time, and we won't next
652 * time, so quit */
653 goto out;
654
655 found:
656 lr = p_r->lr;
657 p_r->lr = GIC_INVALID_LR;
658 set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status);
659 clear_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status);
660 gic_add_to_lr_pending(v, p_r);
661 inflight_r = &p_r->inflight;
662 }
663
664 gic_set_lr(lr, p, GICH_LR_PENDING);
665 list_del_init(&p->lr_queue);
666 set_bit(lr, &this_cpu(lr_mask));
667
668 /* We can only evict nr_lrs entries */
669 lrs--;
670 if ( lrs == 0 )
671 break;
672 }
673
674 out:
675 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
676 }
677
gic_clear_pending_irqs(struct vcpu * v)678 void gic_clear_pending_irqs(struct vcpu *v)
679 {
680 struct pending_irq *p, *t;
681
682 ASSERT(spin_is_locked(&v->arch.vgic.lock));
683
684 v->arch.lr_mask = 0;
685 list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
686 gic_remove_from_lr_pending(v, p);
687 }
688
gic_events_need_delivery(void)689 int gic_events_need_delivery(void)
690 {
691 struct vcpu *v = current;
692 struct pending_irq *p;
693 unsigned long flags;
694 const unsigned long apr = gic_hw_ops->read_apr(0);
695 int mask_priority;
696 int active_priority;
697 int rc = 0;
698
699 mask_priority = gic_hw_ops->read_vmcr_priority();
700 active_priority = find_next_bit(&apr, 32, 0);
701
702 spin_lock_irqsave(&v->arch.vgic.lock, flags);
703
704 /* TODO: We order the guest irqs by priority, but we don't change
705 * the priority of host irqs. */
706
707 /* find the first enabled non-active irq, the queue is already
708 * ordered by priority */
709 list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight )
710 {
711 if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority )
712 goto out;
713 if ( GIC_PRI_TO_GUEST(p->priority) >= active_priority )
714 goto out;
715 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
716 {
717 rc = 1;
718 goto out;
719 }
720 }
721
722 out:
723 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
724 return rc;
725 }
726
gic_inject(void)727 void gic_inject(void)
728 {
729 ASSERT(!local_irq_is_enabled());
730
731 gic_restore_pending_irqs(current);
732
733 if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
734 gic_hw_ops->update_hcr_status(GICH_HCR_UIE, true);
735 }
736
do_sgi(struct cpu_user_regs * regs,enum gic_sgi sgi)737 static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
738 {
739 /* Lower the priority */
740 struct irq_desc *desc = irq_to_desc(sgi);
741
742 perfc_incr(ipis);
743
744 /* Lower the priority */
745 gic_hw_ops->eoi_irq(desc);
746
747 switch (sgi)
748 {
749 case GIC_SGI_EVENT_CHECK:
750 /* Nothing to do, will check for events on return path */
751 break;
752 case GIC_SGI_DUMP_STATE:
753 dump_execstate(regs);
754 break;
755 case GIC_SGI_CALL_FUNCTION:
756 smp_call_function_interrupt();
757 break;
758 default:
759 panic("Unhandled SGI %d on CPU%d", sgi, smp_processor_id());
760 break;
761 }
762
763 /* Deactivate */
764 gic_hw_ops->deactivate_irq(desc);
765 }
766
767 /* Accept an interrupt from the GIC and dispatch its handler */
gic_interrupt(struct cpu_user_regs * regs,int is_fiq)768 void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
769 {
770 unsigned int irq;
771
772 do {
773 /* Reading IRQ will ACK it */
774 irq = gic_hw_ops->read_irq();
775
776 if ( likely(irq >= 16 && irq < 1020) )
777 {
778 local_irq_enable();
779 do_IRQ(regs, irq, is_fiq);
780 local_irq_disable();
781 }
782 else if ( is_lpi(irq) )
783 {
784 local_irq_enable();
785 gic_hw_ops->do_LPI(irq);
786 local_irq_disable();
787 }
788 else if ( unlikely(irq < 16) )
789 {
790 do_sgi(regs, irq);
791 }
792 else
793 {
794 local_irq_disable();
795 break;
796 }
797 } while (1);
798 }
799
maintenance_interrupt(int irq,void * dev_id,struct cpu_user_regs * regs)800 static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
801 {
802 /*
803 * This is a dummy interrupt handler.
804 * Receiving the interrupt is going to cause gic_inject to be called
805 * on return to guest that is going to clear the old LRs and inject
806 * new interrupts.
807 *
808 * Do not add code here: maintenance interrupts caused by setting
809 * GICH_HCR_UIE, might read as spurious interrupts (1023) because
810 * GICH_HCR_UIE is cleared before reading GICC_IAR. As a consequence
811 * this handler is not called.
812 */
813 perfc_incr(maintenance_irqs);
814 }
815
gic_dump_info(struct vcpu * v)816 void gic_dump_info(struct vcpu *v)
817 {
818 struct pending_irq *p;
819
820 printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask);
821 gic_hw_ops->dump_state(v);
822
823 list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight )
824 {
825 printk("Inflight irq=%u lr=%u\n", p->irq, p->lr);
826 }
827
828 list_for_each_entry( p, &v->arch.vgic.lr_pending, lr_queue )
829 {
830 printk("Pending irq=%d\n", p->irq);
831 }
832 }
833
init_maintenance_interrupt(void)834 void init_maintenance_interrupt(void)
835 {
836 request_irq(gic_hw_ops->info->maintenance_irq, 0, maintenance_interrupt,
837 "irq-maintenance", NULL);
838 }
839
gic_make_hwdom_dt_node(const struct domain * d,const struct dt_device_node * gic,void * fdt)840 int gic_make_hwdom_dt_node(const struct domain *d,
841 const struct dt_device_node *gic,
842 void *fdt)
843 {
844 ASSERT(gic == dt_interrupt_controller);
845
846 return gic_hw_ops->make_hwdom_dt_node(d, gic, fdt);
847 }
848
gic_make_hwdom_madt(const struct domain * d,u32 offset)849 int gic_make_hwdom_madt(const struct domain *d, u32 offset)
850 {
851 return gic_hw_ops->make_hwdom_madt(d, offset);
852 }
853
gic_get_hwdom_madt_size(const struct domain * d)854 unsigned long gic_get_hwdom_madt_size(const struct domain *d)
855 {
856 unsigned long madt_size;
857
858 madt_size = sizeof(struct acpi_table_madt)
859 + sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus
860 + sizeof(struct acpi_madt_generic_distributor)
861 + gic_hw_ops->get_hwdom_extra_madt_size(d);
862
863 return madt_size;
864 }
865
gic_iomem_deny_access(const struct domain * d)866 int gic_iomem_deny_access(const struct domain *d)
867 {
868 return gic_hw_ops->iomem_deny_access(d);
869 }
870
871 /*
872 * Local variables:
873 * mode: C
874 * c-file-style: "BSD"
875 * c-basic-offset: 4
876 * indent-tabs-mode: nil
877 * End:
878 */
879