1 /*
2  * Copyright (c) 2006-2022, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2013-07-20     Bernard      first version
9  * 2014-04-03     Grissiom     many enhancements
10  * 2018-11-22     Jesven       add rt_hw_ipi_send()
11  *                             add rt_hw_ipi_handler_install()
12  * 2022-08-24     GuEe-GUI     add pic support
13  * 2022-11-07     GuEe-GUI     add v2m support
14  */
15 
16 #include <rthw.h>
17 #include <rtthread.h>
18 #include <rtdevice.h>
19 
20 #define DBG_TAG "pic.gicv2"
21 #define DBG_LVL DBG_INFO
22 #include <rtdbg.h>
23 
24 #include <cpuport.h>
25 
26 #include <ioremap.h>
27 
28 #include "pic-gicv2.h"
29 #include "pic-gic-common.h"
30 
31 #define GIC_CPU_IMAX 8
32 
33 #define raw_to_gicv2(raw) rt_container_of(raw, struct gicv2, parent)
34 
35 static rt_bool_t needs_rmw_access = RT_FALSE;
36 static int _gicv2_nr = 0, _init_cpu_id = 0;
37 static struct gicv2 _gicv2_list[RT_PIC_ARM_GIC_MAX_NR] = {};
38 static rt_bool_t _gicv2_eoi_mode_ns = RT_FALSE;
39 static rt_uint8_t _gicv2_cpumask_map[GIC_CPU_IMAX] =
40 {
41     [0 ... GIC_CPU_IMAX - 1] = 0xff,
42 };
43 
gicv2_cpumask_map(struct gicv2 * gic)44 static rt_uint8_t gicv2_cpumask_map(struct gicv2 *gic)
45 {
46     rt_uint32_t mask, i;
47 
48     for (i = mask = 0; i < 32; i += 4)
49     {
50         mask = HWREG32(gic->dist_base + GIC_DIST_TARGET + i);
51         mask |= mask >> 16;
52         mask |= mask >> 8;
53 
54         if (mask)
55         {
56             break;
57         }
58     }
59 
60     return mask;
61 }
62 
gicv2_dist_init(struct gicv2 * gic)63 static void gicv2_dist_init(struct gicv2 *gic)
64 {
65     void *base = gic->dist_base;
66     rt_uint32_t i;
67     rt_uint32_t cpumask = gicv2_cpumask_map(gic);
68 
69     _init_cpu_id = rt_hw_cpu_id();
70 
71     gic->max_irq = HWREG32(base + GIC_DIST_TYPE) & 0x1f;
72     gic->max_irq = (gic->max_irq + 1) * 32;
73 
74     /*
75      * The GIC only supports up to 1020 interrupt sources.
76      * Limit this to either the architected maximum, or the
77      * platform maximum.
78      */
79     if (gic->max_irq > 1020)
80     {
81         gic->max_irq = 1020;
82     }
83 
84     LOG_D("Max irq = %d", gic->max_irq);
85 
86     if (gic->skip_init)
87     {
88         return;
89     }
90 
91     HWREG32(base + GIC_DIST_CTRL) = GICD_DISABLE;
92 
93     /* Set all global (unused) interrupts to this CPU only. */
94     cpumask |= cpumask << 8;
95     cpumask |= cpumask << 16;
96 
97     for (i = 32; i < gic->max_irq; i += 4)
98     {
99         HWREG32(base + GIC_DIST_TARGET + i * 4 / 4) = cpumask;
100     }
101 
102     gic_common_dist_config(base, gic->max_irq, RT_NULL, RT_NULL);
103 
104     HWREG32(base + GIC_DIST_CTRL) = GICD_ENABLE;
105 }
106 
gicv2_cpu_init(struct gicv2 * gic)107 static void gicv2_cpu_init(struct gicv2 *gic)
108 {
109     rt_uint32_t cpumask;
110     void *base = gic->cpu_base;
111     rt_uint32_t config = GICC_ENABLE;
112     int cpu_id = rt_hw_cpu_id();
113 
114     cpumask = gicv2_cpumask_map(gic);
115     _gicv2_cpumask_map[cpu_id] = cpumask;
116 
117     /*
118      * Clear our mask from the other map entries in case they're
119      * still undefined.
120      */
121     for (int i = 0; i < RT_ARRAY_SIZE(_gicv2_cpumask_map); ++i)
122     {
123         if (i != cpu_id)
124         {
125             _gicv2_cpumask_map[i] &= ~cpumask;
126         }
127     }
128 
129     gic_common_cpu_config(gic->dist_base, 32, RT_NULL, RT_NULL);
130 
131     HWREG32(base + GIC_CPU_PRIMASK) = GICC_INT_PRI_THRESHOLD;
132     HWREG32(base + GIC_CPU_BINPOINT) = 0x7;
133 
134 #ifdef ARCH_SUPPORT_HYP
135     _gicv2_eoi_mode_ns = RT_TRUE;
136 #else
137     _gicv2_eoi_mode_ns = !!rt_ofw_bootargs_select("pic.gicv2_eoimode", 0);
138 #endif
139 
140     if (_gicv2_eoi_mode_ns)
141     {
142         config |= GIC_CPU_CTRL_EOI_MODE_NS;
143     }
144 
145     HWREG32(base + GIC_CPU_CTRL) = config;
146 }
147 
gicv2_irq_init(struct rt_pic * pic)148 static rt_err_t gicv2_irq_init(struct rt_pic *pic)
149 {
150     gicv2_cpu_init(rt_container_of(pic, struct gicv2, parent));
151 
152     return RT_EOK;
153 }
154 
gicv2_irq_ack(struct rt_pic_irq * pirq)155 static void gicv2_irq_ack(struct rt_pic_irq *pirq)
156 {
157     int hwirq = pirq->hwirq;
158     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
159 
160     if (!_gicv2_eoi_mode_ns)
161     {
162         HWREG32(gic->dist_base + GIC_DIST_PENDING_CLEAR + hwirq / 32 * 4) = 1U << (hwirq % 32);
163     }
164 
165     HWREG32(gic->cpu_base + GIC_CPU_EOI) = hwirq;
166 }
167 
gicv2_irq_mask(struct rt_pic_irq * pirq)168 static void gicv2_irq_mask(struct rt_pic_irq *pirq)
169 {
170     int hwirq = pirq->hwirq;
171     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
172 
173     HWREG32(gic->dist_base + GIC_DIST_ENABLE_CLEAR + hwirq / 32 * 4) = 1U << (hwirq % 32);
174 }
175 
gicv2_irq_unmask(struct rt_pic_irq * pirq)176 static void gicv2_irq_unmask(struct rt_pic_irq *pirq)
177 {
178     int hwirq = pirq->hwirq;
179     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
180 
181     HWREG32(gic->dist_base + GIC_DIST_ENABLE_SET + hwirq / 32 * 4) = 1U << (hwirq % 32);
182 }
183 
gicv2_irq_eoi(struct rt_pic_irq * pirq)184 static void gicv2_irq_eoi(struct rt_pic_irq *pirq)
185 {
186     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
187 
188     if (_gicv2_eoi_mode_ns)
189     {
190         HWREG32(gic->cpu_base + GIC_CPU_DIR) = pirq->hwirq;
191     }
192 }
193 
gicv2_irq_set_priority(struct rt_pic_irq * pirq,rt_uint32_t priority)194 static rt_err_t gicv2_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
195 {
196     rt_uint32_t mask;
197     int hwirq = pirq->hwirq;
198     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
199 
200     mask = HWREG32(gic->dist_base + GIC_DIST_PRI + hwirq / 4 * 4);
201     mask &= ~(0xffU << ((hwirq % 4) * 8));
202     mask |= ((priority & 0xffU) << ((hwirq % 4) * 8));
203     HWREG32(gic->dist_base + GIC_DIST_PRI + hwirq / 4 * 4) = mask;
204 
205     return RT_EOK;
206 }
207 
gicv2_irq_set_affinity(struct rt_pic_irq * pirq,rt_bitmap_t * affinity)208 static rt_err_t gicv2_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
209 {
210     int hwirq = pirq->hwirq;
211     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
212     rt_uint32_t target_list = ((rt_uint8_t *)affinity)[gic - &_gicv2_list[0]];
213     rt_uint8_t valb = _gicv2_cpumask_map[__rt_ffs(target_list) - 1];
214     void *io_addr = gic->dist_base + GIC_DIST_TARGET + hwirq;
215 
216     if (valb == 0xfe)
217     {
218         return -RT_EIO;
219     }
220 
221     if (needs_rmw_access)
222     {
223         /* RMW write byte */
224         rt_uint32_t val;
225         rt_ubase_t level;
226         rt_ubase_t offset = (rt_ubase_t)io_addr & 3UL, shift = offset * 8;
227         static RT_DEFINE_SPINLOCK(rmw_lock);
228 
229         level = rt_spin_lock_irqsave(&rmw_lock);
230 
231         io_addr -= offset;
232         val = HWREG32(io_addr);
233         val &= ~RT_GENMASK(shift + 7, shift);
234         val |= valb << shift;
235         HWREG32(io_addr) = val;
236 
237         rt_spin_unlock_irqrestore(&rmw_lock, level);
238     }
239     else
240     {
241         HWREG8(io_addr) = valb;
242     }
243 
244     return RT_EOK;
245 }
246 
gicv2_irq_set_triger_mode(struct rt_pic_irq * pirq,rt_uint32_t mode)247 static rt_err_t gicv2_irq_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
248 {
249     rt_err_t err = RT_EOK;
250     int hwirq = pirq->hwirq;
251     struct gicv2 *gic = raw_to_gicv2(pirq->pic);
252 
253     if (hwirq >= GIC_SGI_NR)
254     {
255         err = gic_common_configure_irq(gic->dist_base + GIC_DIST_CONFIG, pirq->hwirq, mode, RT_NULL, RT_NULL);
256     }
257     else
258     {
259         err = -RT_ENOSYS;
260     }
261 
262     return err;
263 }
264 
gicv2_irq_send_ipi(struct rt_pic_irq * pirq,rt_bitmap_t * cpumask)265 static void gicv2_irq_send_ipi(struct rt_pic_irq *pirq, rt_bitmap_t *cpumask)
266 {
267     struct gicv2 *gic;
268     int sgi = pirq->hwirq;
269     rt_uint8_t *target_list = (rt_uint8_t *)cpumask;
270 
271     for (int i = 0; i < _gicv2_nr; ++i)
272     {
273         if (*target_list)
274         {
275             gic = &_gicv2_list[i];
276 
277             HWREG32(gic->dist_base + GIC_DIST_SOFTINT) = ((*target_list & 0xffU) << 16) | (sgi & 0xf);
278 
279             rt_hw_dsb();
280         }
281 
282         ++target_list;
283     }
284 }
285 
gicv2_irq_set_state(struct rt_pic * pic,int hwirq,int type,rt_bool_t state)286 static rt_err_t gicv2_irq_set_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
287 {
288     rt_err_t err = RT_EOK;
289     rt_uint32_t offset = 0;
290     struct gicv2 *gic = raw_to_gicv2(pic);
291 
292     switch (type)
293     {
294     case RT_IRQ_STATE_PENDING:
295         offset = state ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
296         break;
297     case RT_IRQ_STATE_ACTIVE:
298         offset = state ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
299         break;
300     case RT_IRQ_STATE_MASKED:
301         offset = state ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
302         break;
303     default:
304         err = -RT_EINVAL;
305         break;
306     }
307 
308     if (!err)
309     {
310         rt_uint32_t mask = 1 << (hwirq % 32);
311 
312         HWREG32(gic->dist_base + offset + (hwirq / 32) * 4) = mask;
313     }
314 
315     return err;
316 }
317 
gicv2_irq_get_state(struct rt_pic * pic,int hwirq,int type,rt_bool_t * out_state)318 static rt_err_t gicv2_irq_get_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
319 {
320     rt_err_t err = RT_EOK;
321     rt_uint32_t offset = 0;
322     struct gicv2 *gic = raw_to_gicv2(pic);
323 
324     switch (type)
325     {
326     case RT_IRQ_STATE_PENDING:
327         offset = GIC_DIST_PENDING_SET;
328         break;
329     case RT_IRQ_STATE_ACTIVE:
330         offset = GIC_DIST_ACTIVE_SET;
331         break;
332     case RT_IRQ_STATE_MASKED:
333         offset = GIC_DIST_ENABLE_SET;
334         break;
335     default:
336         err = -RT_EINVAL;
337         break;
338     }
339 
340     if (!err)
341     {
342         rt_uint32_t mask = 1 << (hwirq % 32);
343 
344         *out_state = !!(HWREG32(gic->dist_base + offset + (hwirq / 32) * 4) & mask);
345     }
346 
347     return err;
348 }
349 
gicv2_irq_map(struct rt_pic * pic,int hwirq,rt_uint32_t mode)350 static int gicv2_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
351 {
352     int irq, irq_index = hwirq - GIC_SGI_NR;
353     struct rt_pic_irq *pirq = rt_pic_find_irq(pic, irq_index);
354 
355     if (pirq && hwirq >= GIC_SGI_NR)
356     {
357         pirq->mode = mode;
358         pirq->priority = GICD_INT_DEF_PRI;
359 
360         if (hwirq < 32)
361         {
362             gic_fill_ppi_affinity(pirq->affinity);
363         }
364         else
365         {
366             RT_IRQ_AFFINITY_SET(pirq->affinity, _init_cpu_id);
367         }
368 
369         irq = rt_pic_config_irq(pic, irq_index, hwirq);
370 
371         if (irq >= 0 && mode != RT_IRQ_MODE_LEVEL_HIGH)
372         {
373             gicv2_irq_set_triger_mode(pirq, mode);
374         }
375     }
376     else
377     {
378         irq = -1;
379     }
380 
381     return irq;
382 }
383 
gicv2_irq_parse(struct rt_pic * pic,struct rt_ofw_cell_args * args,struct rt_pic_irq * out_pirq)384 static rt_err_t gicv2_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
385 {
386     rt_err_t err = RT_EOK;
387 
388     if (args->args_count == 3)
389     {
390         out_pirq->mode = args->args[2] & RT_IRQ_MODE_MASK;
391 
392         switch (args->args[0])
393         {
394         case 0:
395             /* SPI */
396             out_pirq->hwirq = args->args[1] + 32;
397             break;
398         case 1:
399             /* PPI */
400             out_pirq->hwirq = args->args[1] + 16;
401             break;
402         default:
403             err = -RT_ENOSYS;
404             break;
405         }
406     }
407     else
408     {
409         err = -RT_EINVAL;
410     }
411 
412     return err;
413 }
414 
415 const static struct rt_pic_ops gicv2_ops =
416 {
417     .name = "GICv2",
418     .irq_init = gicv2_irq_init,
419     .irq_ack = gicv2_irq_ack,
420     .irq_mask = gicv2_irq_mask,
421     .irq_unmask = gicv2_irq_unmask,
422     .irq_eoi = gicv2_irq_eoi,
423     .irq_set_priority = gicv2_irq_set_priority,
424     .irq_set_affinity = gicv2_irq_set_affinity,
425     .irq_set_triger_mode = gicv2_irq_set_triger_mode,
426     .irq_send_ipi = gicv2_irq_send_ipi,
427     .irq_set_state = gicv2_irq_set_state,
428     .irq_get_state = gicv2_irq_get_state,
429     .irq_map = gicv2_irq_map,
430     .irq_parse = gicv2_irq_parse,
431 };
432 
gicv2_handler(void * data)433 static rt_bool_t gicv2_handler(void *data)
434 {
435     rt_bool_t res = RT_FALSE;
436     int hwirq;
437     struct gicv2 *gic = data;
438 
439     hwirq = HWREG32(gic->cpu_base + GIC_CPU_INTACK) & 0x3ffUL;
440 
441     if (!(hwirq >= 1020 && hwirq <= 1023))
442     {
443         struct rt_pic_irq *pirq;
444 
445         if (hwirq < GIC_SGI_NR)
446         {
447             rt_hw_rmb();
448 
449             pirq = rt_pic_find_ipi(&gic->parent, hwirq);
450         }
451         else
452         {
453             pirq = rt_pic_find_irq(&gic->parent, hwirq - GIC_SGI_NR);
454         }
455 
456         gicv2_irq_ack(pirq);
457 
458         rt_pic_handle_isr(pirq);
459 
460         gicv2_irq_eoi(pirq);
461 
462         res = RT_TRUE;
463     }
464 
465     return res;
466 }
467 
gicv2_enable_rmw_access(void * data)468 static rt_err_t gicv2_enable_rmw_access(void *data)
469 {
470     if (rt_ofw_machine_is_compatible("renesas,emev2"))
471     {
472         needs_rmw_access = RT_TRUE;
473         return RT_EOK;
474     }
475 
476     return -RT_EINVAL;
477 }
478 
479 static const struct gic_quirk _gicv2_quirks[] =
480 {
481     {
482         .desc       = "GICv2: Broken byte access",
483         .compatible = "arm,pl390",
484         .init       = gicv2_enable_rmw_access,
485     },
486     { /* sentinel */ }
487 };
488 
gicv2_iomap_init(struct gicv2 * gic,rt_uint64_t * regs)489 static rt_err_t gicv2_iomap_init(struct gicv2 *gic, rt_uint64_t *regs)
490 {
491     rt_err_t err = RT_EOK;
492     int idx;
493     const char *name[] =
494     {
495         "Distributor",
496         "CPU interfaces",
497         "Virtual interface control",
498         "Virtual CPU interface",
499     };
500 
501     do {
502         /* GICD->GICC->GICH->GICV */
503         gic->dist_size = regs[1];
504         gic->dist_base = rt_ioremap((void *)regs[0], gic->dist_size);
505         if (!gic->dist_base)
506         {
507             idx = 0;
508             err = -RT_ERROR;
509             break;
510         }
511 
512         gic->cpu_size = regs[3];
513         gic->cpu_base = rt_ioremap((void *)regs[2], gic->cpu_size);
514         if (!gic->cpu_base)
515         {
516             idx = 1;
517             err = -RT_ERROR;
518             break;
519         }
520 
521         /* ArchRev[4:7] */
522         gic->version = HWREG32(gic->dist_base + GIC_DIST_ICPIDR2) >> 4;
523 
524     #ifdef ARCH_SUPPORT_HYP
525         if (gic->version == 1)
526         {
527             break;
528         }
529 
530         gic->hyp_size = regs[5];
531         gic->hyp_base = rt_ioremap((void *)regs[4], gic->hyp_size);
532         if (!gic->hyp_base)
533         {
534             idx = 2;
535             err = -RT_ERROR;
536             break;
537         }
538 
539         gic->vcpu_size = regs[7];
540         gic->vcpu_base = rt_ioremap((void *)regs[6], gic->vcpu_size);
541         if (!gic->vcpu_base)
542         {
543             idx = 3;
544             err = -RT_ERROR;
545             break;
546         }
547     #endif /* ARCH_SUPPORT_HYP */
548     } while (0);
549 
550     if (err)
551     {
552         RT_UNUSED(idx);
553         RT_UNUSED(name);
554 
555         LOG_E("gic[%d] %s IO[%p, %p] map fail", _gicv2_nr, name[idx], regs[idx * 2], regs[idx * 2 + 1]);
556     }
557 
558     return err;
559 }
560 
gicv2_init(struct gicv2 * gic)561 static void gicv2_init(struct gicv2 *gic)
562 {
563     gicv2_dist_init(gic);
564 
565     gic->parent.priv_data = gic;
566     gic->parent.ops = &gicv2_ops;
567 
568     rt_pic_linear_irq(&gic->parent, gic->max_irq + 1 - GIC_SGI_NR);
569     gic_common_sgi_config(gic->dist_base, &gic->parent, _gicv2_nr * GIC_SGI_NR);
570 
571     rt_pic_add_traps(gicv2_handler, gic);
572 
573     rt_pic_user_extends(&gic->parent);
574 }
575 
gicv2_init_fail(struct gicv2 * gic)576 static void gicv2_init_fail(struct gicv2 *gic)
577 {
578     if (gic->dist_base)
579     {
580         rt_iounmap(gic->dist_base);
581     }
582     if (gic->cpu_base)
583     {
584         rt_iounmap(gic->cpu_base);
585     }
586     if (gic->hyp_base)
587     {
588         rt_iounmap(gic->hyp_base);
589     }
590     if (gic->vcpu_base)
591     {
592         rt_iounmap(gic->vcpu_base);
593     }
594     rt_memset(gic, 0, sizeof(*gic));
595 }
596 
gicv2_ofw_init(struct rt_ofw_node * np,const struct rt_ofw_node_id * id)597 static rt_err_t gicv2_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
598 {
599     rt_err_t err = RT_EOK;
600     struct gicv2 *gic = RT_NULL;
601 
602     do {
603         rt_uint64_t regs[8];
604 
605         if (_gicv2_nr >= RT_PIC_ARM_GIC_MAX_NR)
606         {
607             LOG_W("GICv2/v1 table is full");
608             err = -RT_EFULL;
609             break;
610         }
611 
612         gic = &_gicv2_list[_gicv2_nr];
613 
614         rt_ofw_get_address_array(np, RT_ARRAY_SIZE(regs), regs);
615 
616         if ((err = gicv2_iomap_init(gic, regs)))
617         {
618             break;
619         }
620 
621         if (gic->version != 1 && gic->version != 2)
622         {
623             LOG_E("Version = %d is not support", gic->version);
624             err = -RT_EINVAL;
625             break;
626         }
627 
628         gic->skip_init = rt_ofw_prop_read_bool(np, "skip-init");
629 
630         gic_common_init_quirk_ofw(np, _gicv2_quirks, gic);
631         gicv2_init(gic);
632 
633         rt_ofw_data(np) = &gic->parent;
634 
635         if (gic->version == 2)
636         {
637         #ifdef RT_PIC_ARM_GIC_V2M
638             gicv2m_ofw_probe(np, id);
639         #endif
640         }
641 
642         ++_gicv2_nr;
643     } while (0);
644 
645     if (err && gic)
646     {
647         gicv2_init_fail(gic);
648     }
649 
650     return err;
651 }
652 
653 static const struct rt_ofw_node_id gicv2_ofw_ids[] =
654 {
655     { .compatible = "arm,gic-400" },
656     { .compatible = "arm,arm11mp-gic" },
657     { .compatible = "arm,arm1176jzf-devchip-gic" },
658     { .compatible = "arm,cortex-a15-gic" },
659     { .compatible = "arm,cortex-a9-gic" },
660     { .compatible = "arm,cortex-a7-gic" },
661     { .compatible = "qcom,msm-8660-qgic" },
662     { .compatible = "qcom,msm-qgic2" },
663     { .compatible = "arm,pl390" },
664     { /* sentinel */ }
665 };
666 RT_PIC_OFW_DECLARE(gicv2, gicv2_ofw_ids, gicv2_ofw_init);
667