1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <drivers/gic.h>
11 #include <keep.h>
12 #include <kernel/dt.h>
13 #include <kernel/interrupt.h>
14 #include <kernel/panic.h>
15 #include <mm/core_memprot.h>
16 #include <mm/core_mmu.h>
17 #include <libfdt.h>
18 #include <util.h>
19 #include <io.h>
20 #include <trace.h>
21
22 /* Offsets from gic.gicc_base */
23 #define GICC_CTLR (0x000)
24 #define GICC_PMR (0x004)
25 #define GICC_IAR (0x00C)
26 #define GICC_EOIR (0x010)
27
28 #define GICC_CTLR_ENABLEGRP0 (1 << 0)
29 #define GICC_CTLR_ENABLEGRP1 (1 << 1)
30 #define GICD_CTLR_ENABLEGRP1S (1 << 2)
31 #define GICC_CTLR_FIQEN (1 << 3)
32
33 /* Offsets from gic.gicd_base */
34 #define GICD_CTLR (0x000)
35 #define GICD_TYPER (0x004)
36 #define GICD_IGROUPR(n) (0x080 + (n) * 4)
37 #define GICD_ISENABLER(n) (0x100 + (n) * 4)
38 #define GICD_ICENABLER(n) (0x180 + (n) * 4)
39 #define GICD_ISPENDR(n) (0x200 + (n) * 4)
40 #define GICD_ICPENDR(n) (0x280 + (n) * 4)
41 #define GICD_IPRIORITYR(n) (0x400 + (n) * 4)
42 #define GICD_ITARGETSR(n) (0x800 + (n) * 4)
43 #define GICD_IGROUPMODR(n) (0xd00 + (n) * 4)
44 #define GICD_SGIR (0xF00)
45
46 #define GICD_CTLR_ENABLEGRP0 (1 << 0)
47 #define GICD_CTLR_ENABLEGRP1 (1 << 1)
48
49 /* Number of Private Peripheral Interrupt */
50 #define NUM_PPI 32
51
52 /* Number of Software Generated Interrupt */
53 #define NUM_SGI 16
54
55 /* Number of Non-secure Software Generated Interrupt */
56 #define NUM_NS_SGI 8
57
58 /* Number of interrupts in one register */
59 #define NUM_INTS_PER_REG 32
60
61 /* Number of targets in one register */
62 #define NUM_TARGETS_PER_REG 4
63
64 /* Accessors to access ITARGETSRn */
65 #define ITARGETSR_FIELD_BITS 8
66 #define ITARGETSR_FIELD_MASK 0xff
67
68 /* Maximum number of interrups a GIC can support */
69 #define GIC_MAX_INTS 1020
70
71 #define GICC_IAR_IT_ID_MASK 0x3ff
72 #define GICC_IAR_CPU_ID_MASK 0x7
73 #define GICC_IAR_CPU_ID_SHIFT 10
74
75 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
76 uint32_t prio);
77 static void gic_op_enable(struct itr_chip *chip, size_t it);
78 static void gic_op_disable(struct itr_chip *chip, size_t it);
79 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
80 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
81 uint8_t cpu_mask);
82 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
83 uint8_t cpu_mask);
84
85 static const struct itr_ops gic_ops = {
86 .add = gic_op_add,
87 .enable = gic_op_enable,
88 .disable = gic_op_disable,
89 .raise_pi = gic_op_raise_pi,
90 .raise_sgi = gic_op_raise_sgi,
91 .set_affinity = gic_op_set_affinity,
92 };
93 DECLARE_KEEP_PAGER(gic_ops);
94
probe_max_it(vaddr_t gicc_base __maybe_unused,vaddr_t gicd_base)95 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
96 {
97 int i;
98 uint32_t old_ctlr;
99 size_t ret = 0;
100 const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
101 NUM_INTS_PER_REG) - 1;
102
103 /*
104 * Probe which interrupt number is the largest.
105 */
106 #if defined(CFG_ARM_GICV3)
107 old_ctlr = read_icc_ctlr();
108 write_icc_ctlr(0);
109 #else
110 old_ctlr = io_read32(gicc_base + GICC_CTLR);
111 io_write32(gicc_base + GICC_CTLR, 0);
112 #endif
113 for (i = max_regs; i >= 0; i--) {
114 uint32_t old_reg;
115 uint32_t reg;
116 int b;
117
118 old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
119 io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
120 reg = io_read32(gicd_base + GICD_ISENABLER(i));
121 io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
122 for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
123 if (BIT32(b) & reg) {
124 ret = i * NUM_INTS_PER_REG + b;
125 goto out;
126 }
127 }
128 }
129 out:
130 #if defined(CFG_ARM_GICV3)
131 write_icc_ctlr(old_ctlr);
132 #else
133 io_write32(gicc_base + GICC_CTLR, old_ctlr);
134 #endif
135 return ret;
136 }
137
gic_cpu_init(struct gic_data * gd)138 void gic_cpu_init(struct gic_data *gd)
139 {
140 #if defined(CFG_ARM_GICV3)
141 assert(gd->gicd_base);
142 #else
143 assert(gd->gicd_base && gd->gicc_base);
144 #endif
145
146 /* per-CPU interrupts config:
147 * ID0-ID7(SGI) for Non-secure interrupts
148 * ID8-ID15(SGI) for Secure interrupts.
149 * All PPI config as Non-secure interrupts.
150 */
151 io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
152
153 /* Set the priority mask to permit Non-secure interrupts, and to
154 * allow the Non-secure world to adjust the priority mask itself
155 */
156 #if defined(CFG_ARM_GICV3)
157 write_icc_pmr(0x80);
158 write_icc_igrpen1(1);
159 #else
160 io_write32(gd->gicc_base + GICC_PMR, 0x80);
161
162 /* Enable GIC */
163 io_write32(gd->gicc_base + GICC_CTLR,
164 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
165 GICC_CTLR_FIQEN);
166 #endif
167 }
168
gic_init(struct gic_data * gd,paddr_t gicc_base_pa,paddr_t gicd_base_pa)169 void gic_init(struct gic_data *gd, paddr_t gicc_base_pa, paddr_t gicd_base_pa)
170 {
171 size_t n;
172
173 gic_init_base_addr(gd, gicc_base_pa, gicd_base_pa);
174
175 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
176 /* Disable interrupts */
177 io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
178
179 /* Make interrupts non-pending */
180 io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
181
182 /* Mark interrupts non-secure */
183 if (n == 0) {
184 /* per-CPU inerrupts config:
185 * ID0-ID7(SGI) for Non-secure interrupts
186 * ID8-ID15(SGI) for Secure interrupts.
187 * All PPI config as Non-secure interrupts.
188 */
189 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
190 } else {
191 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
192 }
193 }
194
195 /* Set the priority mask to permit Non-secure interrupts, and to
196 * allow the Non-secure world to adjust the priority mask itself
197 */
198 #if defined(CFG_ARM_GICV3)
199 write_icc_pmr(0x80);
200 write_icc_igrpen1(1);
201 io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
202 #else
203 io_write32(gd->gicc_base + GICC_PMR, 0x80);
204
205 /* Enable GIC */
206 io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
207 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
208 io_setbits32(gd->gicd_base + GICD_CTLR,
209 GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
210 #endif
211 }
212
gic_dt_get_irq(const uint32_t * properties,int count,uint32_t * type,uint32_t * prio)213 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
214 uint32_t *prio)
215 {
216 int it_num = DT_INFO_INVALID_INTERRUPT;
217
218 if (type)
219 *type = IRQ_TYPE_NONE;
220
221 if (prio)
222 *prio = 0;
223
224 if (!properties || count < 2)
225 return DT_INFO_INVALID_INTERRUPT;
226
227 it_num = fdt32_to_cpu(properties[1]);
228
229 switch (fdt32_to_cpu(properties[0])) {
230 case 1:
231 it_num += 16;
232 break;
233 case 0:
234 it_num += 32;
235 break;
236 default:
237 it_num = DT_INFO_INVALID_INTERRUPT;
238 }
239
240 return it_num;
241 }
242
gic_init_base_addr(struct gic_data * gd,paddr_t gicc_base_pa __maybe_unused,paddr_t gicd_base_pa)243 void gic_init_base_addr(struct gic_data *gd,
244 paddr_t gicc_base_pa __maybe_unused,
245 paddr_t gicd_base_pa)
246 {
247 vaddr_t gicc_base = 0;
248 vaddr_t gicd_base = 0;
249
250 assert(cpu_mmu_enabled());
251
252 gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
253 GIC_DIST_REG_SIZE);
254 if (!gicd_base)
255 panic();
256
257 if (!IS_ENABLED(CFG_ARM_GICV3)) {
258 gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
259 GIC_CPU_REG_SIZE);
260 if (!gicc_base)
261 panic();
262 }
263
264 gd->gicc_base = gicc_base;
265 gd->gicd_base = gicd_base;
266 gd->max_it = probe_max_it(gicc_base, gicd_base);
267 gd->chip.ops = &gic_ops;
268
269 if (IS_ENABLED(CFG_DT))
270 gd->chip.dt_get_irq = gic_dt_get_irq;
271 }
272
gic_it_add(struct gic_data * gd,size_t it)273 static void gic_it_add(struct gic_data *gd, size_t it)
274 {
275 size_t idx = it / NUM_INTS_PER_REG;
276 uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
277
278 /* Disable the interrupt */
279 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
280 /* Make it non-pending */
281 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
282 /* Assign it to group0 */
283 io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
284 #if defined(CFG_ARM_GICV3)
285 /* Assign it to group1S */
286 io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
287 #endif
288 }
289
gic_it_set_cpu_mask(struct gic_data * gd,size_t it,uint8_t cpu_mask)290 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
291 uint8_t cpu_mask)
292 {
293 size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
294 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
295 uint32_t target, target_shift;
296 vaddr_t itargetsr = gd->gicd_base +
297 GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
298
299 /* Assigned to group0 */
300 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
301
302 /* Route it to selected CPUs */
303 target = io_read32(itargetsr);
304 target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
305 target &= ~(ITARGETSR_FIELD_MASK << target_shift);
306 target |= cpu_mask << target_shift;
307 DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
308 io_write32(itargetsr, target);
309 DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
310 }
311
gic_it_set_prio(struct gic_data * gd,size_t it,uint8_t prio)312 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
313 {
314 size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
315 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
316
317 /* Assigned to group0 */
318 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
319
320 /* Set prio it to selected CPUs */
321 DMSG("prio: writing 0x%x to 0x%" PRIxVA,
322 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
323 io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
324 }
325
gic_it_enable(struct gic_data * gd,size_t it)326 static void gic_it_enable(struct gic_data *gd, size_t it)
327 {
328 size_t idx = it / NUM_INTS_PER_REG;
329 uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
330 vaddr_t base = gd->gicd_base;
331
332 /* Assigned to group0 */
333 assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
334
335 /* Enable the interrupt */
336 io_write32(base + GICD_ISENABLER(idx), mask);
337 }
338
gic_it_disable(struct gic_data * gd,size_t it)339 static void gic_it_disable(struct gic_data *gd, size_t it)
340 {
341 size_t idx = it / NUM_INTS_PER_REG;
342 uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
343
344 /* Assigned to group0 */
345 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
346
347 /* Disable the interrupt */
348 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
349 }
350
gic_it_set_pending(struct gic_data * gd,size_t it)351 static void gic_it_set_pending(struct gic_data *gd, size_t it)
352 {
353 size_t idx = it / NUM_INTS_PER_REG;
354 uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
355
356 /* Should be Peripheral Interrupt */
357 assert(it >= NUM_SGI);
358
359 /* Raise the interrupt */
360 io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
361 }
362
gic_it_raise_sgi(struct gic_data * gd,size_t it,uint8_t cpu_mask,uint8_t group)363 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
364 uint8_t cpu_mask, uint8_t group)
365 {
366 uint32_t mask_id = it & 0xf;
367 uint32_t mask_group = group & 0x1;
368 uint32_t mask_cpu = cpu_mask & 0xff;
369 uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
370 SHIFT_U32(mask_cpu, 16));
371
372 /* Should be Software Generated Interrupt */
373 assert(it < NUM_SGI);
374
375 /* Raise the interrupt */
376 io_write32(gd->gicd_base + GICD_SGIR, mask);
377 }
378
gic_read_iar(struct gic_data * gd __maybe_unused)379 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
380 {
381 #if defined(CFG_ARM_GICV3)
382 return read_icc_iar1();
383 #else
384 return io_read32(gd->gicc_base + GICC_IAR);
385 #endif
386 }
387
gic_write_eoir(struct gic_data * gd __maybe_unused,uint32_t eoir)388 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
389 {
390 #if defined(CFG_ARM_GICV3)
391 write_icc_eoir1(eoir);
392 #else
393 io_write32(gd->gicc_base + GICC_EOIR, eoir);
394 #endif
395 }
396
gic_it_is_enabled(struct gic_data * gd,size_t it)397 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
398 {
399 size_t idx = it / NUM_INTS_PER_REG;
400 uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
401 return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
402 }
403
gic_it_get_group(struct gic_data * gd,size_t it)404 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
405 {
406 size_t idx = it / NUM_INTS_PER_REG;
407 uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
408 return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
409 }
410
gic_it_get_target(struct gic_data * gd,size_t it)411 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
412 {
413 size_t reg_idx = it / NUM_TARGETS_PER_REG;
414 uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
415 ITARGETSR_FIELD_BITS;
416 uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
417 uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
418
419 return (target & target_mask) >> target_shift;
420 }
421
gic_dump_state(struct gic_data * gd)422 void gic_dump_state(struct gic_data *gd)
423 {
424 int i;
425
426 #if defined(CFG_ARM_GICV3)
427 DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
428 #else
429 DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
430 #endif
431 DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
432
433 for (i = 0; i <= (int)gd->max_it; i++) {
434 if (gic_it_is_enabled(gd, i)) {
435 DMSG("irq%d: enabled, group:%d, target:%x", i,
436 gic_it_get_group(gd, i), gic_it_get_target(gd, i));
437 }
438 }
439 }
440
gic_it_handle(struct gic_data * gd)441 void gic_it_handle(struct gic_data *gd)
442 {
443 uint32_t iar;
444 uint32_t id;
445
446 iar = gic_read_iar(gd);
447 id = iar & GICC_IAR_IT_ID_MASK;
448
449 if (id <= gd->max_it)
450 itr_handle(id);
451 else
452 DMSG("ignoring interrupt %" PRIu32, id);
453
454 gic_write_eoir(gd, iar);
455 }
456
gic_op_add(struct itr_chip * chip,size_t it,uint32_t type __unused,uint32_t prio __unused)457 static void gic_op_add(struct itr_chip *chip, size_t it,
458 uint32_t type __unused,
459 uint32_t prio __unused)
460 {
461 struct gic_data *gd = container_of(chip, struct gic_data, chip);
462
463 if (it > gd->max_it)
464 panic();
465
466 gic_it_add(gd, it);
467 /* Set the CPU mask to deliver interrupts to any online core */
468 gic_it_set_cpu_mask(gd, it, 0xff);
469 gic_it_set_prio(gd, it, 0x1);
470 }
471
gic_op_enable(struct itr_chip * chip,size_t it)472 static void gic_op_enable(struct itr_chip *chip, size_t it)
473 {
474 struct gic_data *gd = container_of(chip, struct gic_data, chip);
475
476 if (it > gd->max_it)
477 panic();
478
479 gic_it_enable(gd, it);
480 }
481
gic_op_disable(struct itr_chip * chip,size_t it)482 static void gic_op_disable(struct itr_chip *chip, size_t it)
483 {
484 struct gic_data *gd = container_of(chip, struct gic_data, chip);
485
486 if (it > gd->max_it)
487 panic();
488
489 gic_it_disable(gd, it);
490 }
491
gic_op_raise_pi(struct itr_chip * chip,size_t it)492 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
493 {
494 struct gic_data *gd = container_of(chip, struct gic_data, chip);
495
496 if (it > gd->max_it)
497 panic();
498
499 gic_it_set_pending(gd, it);
500 }
501
gic_op_raise_sgi(struct itr_chip * chip,size_t it,uint8_t cpu_mask)502 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
503 uint8_t cpu_mask)
504 {
505 struct gic_data *gd = container_of(chip, struct gic_data, chip);
506
507 if (it > gd->max_it)
508 panic();
509
510 if (it < NUM_NS_SGI)
511 gic_it_raise_sgi(gd, it, cpu_mask, 1);
512 else
513 gic_it_raise_sgi(gd, it, cpu_mask, 0);
514 }
gic_op_set_affinity(struct itr_chip * chip,size_t it,uint8_t cpu_mask)515 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
516 uint8_t cpu_mask)
517 {
518 struct gic_data *gd = container_of(chip, struct gic_data, chip);
519
520 if (it > gd->max_it)
521 panic();
522
523 gic_it_set_cpu_mask(gd, it, cpu_mask);
524 }
525