1 /*
2  * Copyright (c) 2006-2022, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2022-08-24     GuEe-GUI     first version
9  */
10 
11 #include <rthw.h>
12 #include <rtthread.h>
13 
14 #define DBG_TAG "rtdm.pic"
15 #define DBG_LVL DBG_INFO
16 #include <rtdbg.h>
17 
18 #include <drivers/pic.h>
19 #ifdef RT_USING_PIC_STATISTICS
20 #include <ktime.h>
21 #endif
22 
23 struct irq_traps
24 {
25     rt_list_t list;
26 
27     void *data;
28     rt_bool_t (*handler)(void *);
29 };
30 
31 /* reserved ipi */
32 static int _pirq_hash_idx = RT_MAX_IPI;
33 static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
34 {
35     [0 ... MAX_HANDLERS - 1] =
36     {
37         .irq = -1,
38         .hwirq = -1,
39         .mode = RT_IRQ_MODE_NONE,
40         .priority = RT_UINT32_MAX,
41         .rw_lock = { },
42     }
43 };
44 
45 static RT_DEFINE_SPINLOCK(_pic_lock);
46 static rt_size_t _pic_name_max = sizeof("PIC");
47 static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
48 static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
49 
irq2pirq(int irq)50 static struct rt_pic_irq *irq2pirq(int irq)
51 {
52     struct rt_pic_irq *pirq = RT_NULL;
53 
54     if ((irq >= 0) && (irq < MAX_HANDLERS))
55     {
56         pirq = &_pirq_hash[irq];
57 
58         if (pirq->irq < 0)
59         {
60             pirq = RT_NULL;
61         }
62     }
63 
64     if (!pirq)
65     {
66         LOG_E("irq = %d is invalid", irq);
67     }
68 
69     return pirq;
70 }
71 
append_pic(struct rt_pic * pic)72 static void append_pic(struct rt_pic *pic)
73 {
74     int pic_name_len = rt_strlen(pic->ops->name);
75 
76     rt_list_insert_before(&_pic_nodes, &pic->list);
77 
78     if (pic_name_len > _pic_name_max)
79     {
80         _pic_name_max = pic_name_len;
81     }
82 }
83 
rt_pic_default_name(struct rt_pic * pic)84 void rt_pic_default_name(struct rt_pic *pic)
85 {
86     if (pic)
87     {
88     #if RT_NAME_MAX > 0
89         rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
90         pic->parent.name[RT_NAME_MAX - 1] = '\0';
91     #else
92         pic->parent.name = "PIC";
93     #endif
94     }
95 }
96 
rt_pic_dynamic_cast(void * ptr)97 struct rt_pic *rt_pic_dynamic_cast(void *ptr)
98 {
99     struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
100 
101     if (ptr)
102     {
103         struct rt_object *obj = ptr;
104 
105         if (obj->type == RT_Object_Class_Unknown)
106         {
107             tmp = (void *)obj;
108         }
109         else if (obj->type == RT_Object_Class_Device)
110         {
111             tmp = (void *)obj + sizeof(struct rt_device);
112         }
113         else
114         {
115             tmp = (void *)obj + sizeof(struct rt_object);
116         }
117 
118         if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
119         {
120             pic = tmp;
121         }
122     }
123 
124     return pic;
125 }
126 
rt_pic_linear_irq(struct rt_pic * pic,rt_size_t irq_nr)127 rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
128 {
129     rt_err_t err = RT_EOK;
130 
131     if (pic && pic->ops && pic->ops->name)
132     {
133         rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
134 
135         if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
136         {
137             rt_list_init(&pic->list);
138 
139             rt_pic_default_name(pic);
140             pic->parent.type = RT_Object_Class_Unknown;
141 
142             pic->irq_start = _pirq_hash_idx;
143             pic->irq_nr = irq_nr;
144             pic->pirqs = &_pirq_hash[_pirq_hash_idx];
145 
146             _pirq_hash_idx += irq_nr;
147 
148             append_pic(pic);
149 
150             LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
151                     pic->irq_start, pic->irq_start + pic->irq_nr);
152         }
153         else
154         {
155             LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
156 
157             err = -RT_EEMPTY;
158         }
159 
160         rt_spin_unlock_irqrestore(&_pic_lock, level);
161     }
162     else
163     {
164         err = -RT_EINVAL;
165     }
166 
167     return err;
168 }
169 
rt_pic_cancel_irq(struct rt_pic * pic)170 rt_err_t rt_pic_cancel_irq(struct rt_pic *pic)
171 {
172     rt_err_t err = RT_EOK;
173 
174     if (pic && pic->pirqs)
175     {
176         rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
177 
178         /*
179          * This is only to make system runtime safely,
180          * we don't recommend PICs to unregister.
181          */
182         rt_list_remove(&pic->list);
183 
184         rt_spin_unlock_irqrestore(&_pic_lock, level);
185     }
186     else
187     {
188         err = -RT_EINVAL;
189     }
190 
191     return err;
192 }
193 
config_pirq(struct rt_pic * pic,struct rt_pic_irq * pirq,int irq,int hwirq)194 static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
195 {
196     rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
197 
198     if (pirq->irq < 0)
199     {
200         rt_list_init(&pirq->list);
201         rt_list_init(&pirq->children_nodes);
202         rt_list_init(&pirq->isr.list);
203     }
204     else if (pirq->pic != pic)
205     {
206         RT_ASSERT(rt_list_isempty(&pirq->list) == RT_TRUE);
207         RT_ASSERT(rt_list_isempty(&pirq->children_nodes) == RT_TRUE);
208         RT_ASSERT(rt_list_isempty(&pirq->isr.list) == RT_TRUE);
209     }
210 
211     pirq->irq = irq;
212     pirq->hwirq = hwirq;
213 
214     pirq->pic = pic;
215 
216     rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
217 }
218 
rt_pic_config_ipi(struct rt_pic * pic,int ipi_index,int hwirq)219 int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
220 {
221     int ipi = ipi_index;
222     struct rt_pic_irq *pirq;
223 
224     if (pic && ipi < RT_MAX_IPI && hwirq >= 0 && pic->ops->irq_send_ipi)
225     {
226         pirq = &_pirq_hash[ipi];
227         config_pirq(pic, pirq, ipi, hwirq);
228 
229         for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
230         {
231             RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
232         }
233 
234         LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
235     }
236     else
237     {
238         ipi = -RT_EINVAL;
239     }
240 
241     return ipi;
242 }
243 
rt_pic_config_irq(struct rt_pic * pic,int irq_index,int hwirq)244 int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
245 {
246     int irq;
247 
248     if (pic && hwirq >= 0)
249     {
250         irq = pic->irq_start + irq_index;
251 
252         if (irq >= 0 && irq < MAX_HANDLERS)
253         {
254             config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
255 
256             LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
257         }
258         else
259         {
260             irq = -RT_ERROR;
261         }
262     }
263     else
264     {
265         irq = -RT_EINVAL;
266     }
267 
268     return irq;
269 }
270 
rt_pic_find_ipi(struct rt_pic * pic,int ipi_index)271 struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
272 {
273     struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
274 
275     RT_ASSERT(ipi_index < RT_MAX_IPI);
276     RT_ASSERT(pirq->pic == pic);
277 
278     return pirq;
279 }
280 
rt_pic_find_pirq(struct rt_pic * pic,int irq)281 struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
282 {
283     if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
284     {
285         return &pic->pirqs[irq - pic->irq_start];
286     }
287 
288     return RT_NULL;
289 }
290 
rt_pic_cascade(struct rt_pic_irq * pirq,int parent_irq)291 rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
292 {
293     rt_err_t err = RT_EOK;
294 
295     if (pirq && !pirq->parent && parent_irq >= 0)
296     {
297         struct rt_pic_irq *parent;
298 
299         rt_spin_lock(&pirq->rw_lock);
300 
301         parent = irq2pirq(parent_irq);
302 
303         if (parent)
304         {
305             pirq->parent = parent;
306             pirq->priority = parent->priority;
307             rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
308         }
309 
310         rt_spin_unlock(&pirq->rw_lock);
311 
312         if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
313         {
314             rt_spin_lock(&parent->rw_lock);
315 
316             rt_list_insert_before(&parent->children_nodes, &pirq->list);
317 
318             rt_spin_unlock(&parent->rw_lock);
319         }
320     }
321     else
322     {
323         err = -RT_EINVAL;
324     }
325 
326     return err;
327 }
328 
rt_pic_uncascade(struct rt_pic_irq * pirq)329 rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
330 {
331     rt_err_t err = RT_EOK;
332 
333     if (pirq && pirq->parent)
334     {
335         struct rt_pic_irq *parent;
336 
337         rt_spin_lock(&pirq->rw_lock);
338 
339         parent = pirq->parent;
340         pirq->parent = RT_NULL;
341 
342         rt_spin_unlock(&pirq->rw_lock);
343 
344         if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
345         {
346             rt_spin_lock(&parent->rw_lock);
347 
348             rt_list_remove(&pirq->list);
349 
350             rt_spin_unlock(&parent->rw_lock);
351         }
352     }
353     else
354     {
355         err = -RT_EINVAL;
356     }
357 
358     return err;
359 }
360 
rt_pic_attach_irq(int irq,rt_isr_handler_t handler,void * uid,const char * name,int flags)361 rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
362 {
363     rt_err_t err = -RT_EINVAL;
364     struct rt_pic_irq *pirq;
365 
366     if (handler && name && (pirq = irq2pirq(irq)))
367     {
368         struct rt_pic_isr *isr = RT_NULL;
369         rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
370 
371         err = RT_EOK;
372 
373         if (!pirq->isr.action.handler)
374         {
375             /* first attach */
376             isr = &pirq->isr;
377             rt_list_init(&isr->list);
378         }
379         else
380         {
381             rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
382 
383             if ((isr = rt_malloc(sizeof(*isr))))
384             {
385                 rt_list_init(&isr->list);
386 
387                 level = rt_spin_lock_irqsave(&pirq->rw_lock);
388 
389                 rt_list_insert_after(&pirq->isr.list, &isr->list);
390             }
391             else
392             {
393                 LOG_E("No memory to save '%s' isr", name);
394                 err = -RT_ERROR;
395             }
396         }
397 
398         if (!err)
399         {
400             isr->flags = flags;
401             isr->action.handler = handler;
402             isr->action.param = uid;
403         #ifdef RT_USING_INTERRUPT_INFO
404             isr->action.counter = 0;
405             rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
406             isr->action.name[RT_NAME_MAX - 1] = '\0';
407         #ifdef RT_USING_SMP
408             rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
409         #endif
410         #endif
411 
412             rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
413         }
414     }
415 
416     return err;
417 }
418 
rt_pic_detach_irq(int irq,void * uid)419 rt_err_t rt_pic_detach_irq(int irq, void *uid)
420 {
421     rt_err_t err = -RT_EINVAL;
422     struct rt_pic_irq *pirq = irq2pirq(irq);
423 
424     if (pirq)
425     {
426         rt_bool_t will_free = RT_FALSE;
427         struct rt_pic_isr *isr = RT_NULL;
428         rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
429 
430         isr = &pirq->isr;
431 
432         if (isr->action.param == uid)
433         {
434             if (rt_list_isempty(&isr->list))
435             {
436                 isr->action.handler = RT_NULL;
437                 isr->action.param = RT_NULL;
438             }
439             else
440             {
441                 struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
442 
443                 rt_list_remove(&next_isr->list);
444 
445                 isr->action.handler = next_isr->action.handler;
446                 isr->action.param = next_isr->action.param;
447             #ifdef RT_USING_INTERRUPT_INFO
448                 isr->action.counter = next_isr->action.counter;
449                 rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
450             #ifdef RT_USING_SMP
451                 rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
452             #endif
453             #endif
454 
455                 isr = next_isr;
456                 will_free = RT_TRUE;
457             }
458 
459             err = RT_EOK;
460         }
461         else
462         {
463             rt_list_for_each_entry(isr, &pirq->isr.list, list)
464             {
465                 if (isr->action.param == uid)
466                 {
467                     err = RT_EOK;
468 
469                     will_free = RT_TRUE;
470                     rt_list_remove(&isr->list);
471                     break;
472                 }
473             }
474         }
475 
476         rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
477 
478         if (will_free)
479         {
480             rt_free(isr);
481         }
482     }
483 
484     return err;
485 }
486 
rt_pic_add_traps(rt_bool_t (* handler)(void *),void * data)487 rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
488 {
489     rt_err_t err = -RT_EINVAL;
490 
491     if (handler)
492     {
493         struct irq_traps *traps = rt_malloc(sizeof(*traps));
494 
495         if (traps)
496         {
497             rt_ubase_t level = rt_hw_interrupt_disable();
498 
499             rt_list_init(&traps->list);
500 
501             traps->data = data;
502             traps->handler = handler;
503 
504             rt_list_insert_before(&_traps_nodes, &traps->list);
505             err = RT_EOK;
506 
507             rt_hw_interrupt_enable(level);
508         }
509         else
510         {
511             LOG_E("No memory to save '%p' handler", handler);
512             err = -RT_ENOMEM;
513         }
514     }
515 
516     return err;
517 }
518 
rt_pic_do_traps(void)519 rt_err_t rt_pic_do_traps(void)
520 {
521     rt_err_t err = -RT_ERROR;
522     struct irq_traps *traps;
523 
524     rt_interrupt_enter();
525 
526     rt_list_for_each_entry(traps, &_traps_nodes, list)
527     {
528         if (traps->handler(traps->data))
529         {
530             err = RT_EOK;
531 
532             break;
533         }
534     }
535 
536     rt_interrupt_leave();
537 
538     return err;
539 }
540 
rt_pic_handle_isr(struct rt_pic_irq * pirq)541 rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
542 {
543     rt_err_t err = -RT_EEMPTY;
544     rt_list_t *handler_nodes;
545     struct rt_irq_desc *action;
546 #ifdef RT_USING_PIC_STATISTICS
547     struct timespec ts;
548     rt_ubase_t irq_time_ns;
549     rt_ubase_t current_irq_begin;
550 #endif
551 
552     RT_ASSERT(pirq != RT_NULL);
553     RT_ASSERT(pirq->pic != RT_NULL);
554 
555 #ifdef RT_USING_PIC_STATISTICS
556     rt_ktime_boottime_get_ns(&ts);
557     current_irq_begin = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
558 #endif
559 
560     handler_nodes = &pirq->isr.list;
561     action = &pirq->isr.action;
562 
563     if (!rt_list_isempty(&pirq->children_nodes))
564     {
565         struct rt_pic_irq *child;
566 
567         rt_list_for_each_entry(child, &pirq->children_nodes, list)
568         {
569             if (child->pic->ops->irq_ack)
570             {
571                 child->pic->ops->irq_ack(child);
572             }
573 
574             err = rt_pic_handle_isr(child);
575 
576             if (child->pic->ops->irq_eoi)
577             {
578                 child->pic->ops->irq_eoi(child);
579             }
580         }
581     }
582 
583     if (action->handler)
584     {
585         action->handler(pirq->irq, action->param);
586     #ifdef RT_USING_INTERRUPT_INFO
587         action->counter++;
588     #ifdef RT_USING_SMP
589         action->cpu_counter[rt_hw_cpu_id()]++;
590     #endif
591     #endif
592 
593         if (!rt_list_isempty(handler_nodes))
594         {
595             struct rt_pic_isr *isr;
596 
597             rt_list_for_each_entry(isr, handler_nodes, list)
598             {
599                 action = &isr->action;
600 
601                 RT_ASSERT(action->handler != RT_NULL);
602 
603                 action->handler(pirq->irq, action->param);
604             #ifdef RT_USING_INTERRUPT_INFO
605                 action->counter++;
606             #ifdef RT_USING_SMP
607                 action->cpu_counter[rt_hw_cpu_id()]++;
608             #endif
609             #endif
610             }
611         }
612 
613         err = RT_EOK;
614     }
615 
616 #ifdef RT_USING_PIC_STATISTICS
617     rt_ktime_boottime_get_ns(&ts);
618     irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - current_irq_begin;
619     pirq->stat.sum_irq_time_ns += irq_time_ns;
620     if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
621     {
622         pirq->stat.min_irq_time_ns = irq_time_ns;
623     }
624     if (irq_time_ns > pirq->stat.max_irq_time_ns)
625     {
626         pirq->stat.max_irq_time_ns = irq_time_ns;
627     }
628 #endif
629 
630     return err;
631 }
632 
rt_pic_user_extends(struct rt_pic * pic)633 rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
634 {
635     return -RT_ENOSYS;
636 }
637 
rt_pic_irq_init(void)638 rt_err_t rt_pic_irq_init(void)
639 {
640     rt_err_t err = RT_EOK;
641     struct rt_pic *pic;
642 
643     rt_list_for_each_entry(pic, &_pic_nodes, list)
644     {
645         if (pic->ops->irq_init)
646         {
647             err = pic->ops->irq_init(pic);
648 
649             if (err)
650             {
651                 LOG_E("PIC = %s init fail", pic->ops->name);
652                 break;
653             }
654         }
655     }
656 
657     return err;
658 }
659 
rt_pic_irq_finit(void)660 rt_err_t rt_pic_irq_finit(void)
661 {
662     rt_err_t err = RT_EOK;
663     struct rt_pic *pic;
664 
665     rt_list_for_each_entry(pic, &_pic_nodes, list)
666     {
667         if (pic->ops->irq_finit)
668         {
669             err = pic->ops->irq_finit(pic);
670 
671             if (err)
672             {
673                 LOG_E("PIC = %s finit fail", pic->ops->name);
674                 break;
675             }
676         }
677     }
678 
679     return err;
680 }
681 
rt_pic_irq_enable(int irq)682 void rt_pic_irq_enable(int irq)
683 {
684     struct rt_pic_irq *pirq = irq2pirq(irq);
685 
686     RT_ASSERT(pirq != RT_NULL);
687 
688     rt_hw_spin_lock(&pirq->rw_lock.lock);
689 
690     if (pirq->pic->ops->irq_enable)
691     {
692         pirq->pic->ops->irq_enable(pirq);
693     }
694 
695     rt_hw_spin_unlock(&pirq->rw_lock.lock);
696 }
697 
rt_pic_irq_disable(int irq)698 void rt_pic_irq_disable(int irq)
699 {
700     struct rt_pic_irq *pirq = irq2pirq(irq);
701 
702     RT_ASSERT(pirq != RT_NULL);
703 
704     rt_hw_spin_lock(&pirq->rw_lock.lock);
705 
706     if (pirq->pic->ops->irq_disable)
707     {
708         pirq->pic->ops->irq_disable(pirq);
709     }
710 
711     rt_hw_spin_unlock(&pirq->rw_lock.lock);
712 }
713 
rt_pic_irq_ack(int irq)714 void rt_pic_irq_ack(int irq)
715 {
716     struct rt_pic_irq *pirq = irq2pirq(irq);
717 
718     RT_ASSERT(pirq != RT_NULL);
719 
720     rt_hw_spin_lock(&pirq->rw_lock.lock);
721 
722     if (pirq->pic->ops->irq_ack)
723     {
724         pirq->pic->ops->irq_ack(pirq);
725     }
726 
727     rt_hw_spin_unlock(&pirq->rw_lock.lock);
728 }
729 
rt_pic_irq_mask(int irq)730 void rt_pic_irq_mask(int irq)
731 {
732     struct rt_pic_irq *pirq = irq2pirq(irq);
733 
734     RT_ASSERT(pirq != RT_NULL);
735 
736     rt_hw_spin_lock(&pirq->rw_lock.lock);
737 
738     if (pirq->pic->ops->irq_mask)
739     {
740         pirq->pic->ops->irq_mask(pirq);
741     }
742 
743     rt_hw_spin_unlock(&pirq->rw_lock.lock);
744 }
745 
rt_pic_irq_unmask(int irq)746 void rt_pic_irq_unmask(int irq)
747 {
748     struct rt_pic_irq *pirq = irq2pirq(irq);
749 
750     RT_ASSERT(pirq != RT_NULL);
751 
752     rt_hw_spin_lock(&pirq->rw_lock.lock);
753 
754     if (pirq->pic->ops->irq_unmask)
755     {
756         pirq->pic->ops->irq_unmask(pirq);
757     }
758 
759     rt_hw_spin_unlock(&pirq->rw_lock.lock);
760 }
761 
rt_pic_irq_eoi(int irq)762 void rt_pic_irq_eoi(int irq)
763 {
764     struct rt_pic_irq *pirq = irq2pirq(irq);
765 
766     RT_ASSERT(pirq != RT_NULL);
767 
768     rt_hw_spin_lock(&pirq->rw_lock.lock);
769 
770     if (pirq->pic->ops->irq_eoi)
771     {
772         pirq->pic->ops->irq_eoi(pirq);
773     }
774 
775     rt_hw_spin_unlock(&pirq->rw_lock.lock);
776 }
777 
rt_pic_irq_set_priority(int irq,rt_uint32_t priority)778 rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
779 {
780     rt_err_t err = -RT_EINVAL;
781     struct rt_pic_irq *pirq = irq2pirq(irq);
782 
783     if (pirq)
784     {
785         rt_hw_spin_lock(&pirq->rw_lock.lock);
786 
787         if (pirq->pic->ops->irq_set_priority)
788         {
789             err = pirq->pic->ops->irq_set_priority(pirq, priority);
790 
791             if (!err)
792             {
793                 pirq->priority = priority;
794             }
795         }
796         else
797         {
798             err = -RT_ENOSYS;
799         }
800 
801         rt_hw_spin_unlock(&pirq->rw_lock.lock);
802     }
803 
804     return err;
805 }
806 
rt_pic_irq_get_priority(int irq)807 rt_uint32_t rt_pic_irq_get_priority(int irq)
808 {
809     rt_uint32_t priority = RT_UINT32_MAX;
810     struct rt_pic_irq *pirq = irq2pirq(irq);
811 
812     if (pirq)
813     {
814         rt_hw_spin_lock(&pirq->rw_lock.lock);
815 
816         priority = pirq->priority;
817 
818         rt_hw_spin_unlock(&pirq->rw_lock.lock);
819     }
820 
821     return priority;
822 }
823 
rt_pic_irq_set_affinity(int irq,rt_bitmap_t * affinity)824 rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
825 {
826     rt_err_t err = -RT_EINVAL;
827     struct rt_pic_irq *pirq;
828 
829     if (affinity && (pirq = irq2pirq(irq)))
830     {
831         rt_hw_spin_lock(&pirq->rw_lock.lock);
832 
833         if (pirq->pic->ops->irq_set_affinity)
834         {
835             err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
836 
837             if (!err)
838             {
839                 rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
840             }
841         }
842         else
843         {
844             err = -RT_ENOSYS;
845         }
846 
847         rt_hw_spin_unlock(&pirq->rw_lock.lock);
848     }
849 
850     return err;
851 }
852 
rt_pic_irq_get_affinity(int irq,rt_bitmap_t * out_affinity)853 rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
854 {
855     rt_err_t err = -RT_EINVAL;
856     struct rt_pic_irq *pirq;
857 
858     if (out_affinity && (pirq = irq2pirq(irq)))
859     {
860         rt_hw_spin_lock(&pirq->rw_lock.lock);
861 
862         rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
863         err = RT_EOK;
864 
865         rt_hw_spin_unlock(&pirq->rw_lock.lock);
866     }
867 
868     return err;
869 }
870 
rt_pic_irq_set_triger_mode(int irq,rt_uint32_t mode)871 rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
872 {
873     rt_err_t err = -RT_EINVAL;
874     struct rt_pic_irq *pirq;
875 
876     if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
877     {
878         rt_hw_spin_lock(&pirq->rw_lock.lock);
879 
880         if (pirq->pic->ops->irq_set_triger_mode)
881         {
882             err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
883 
884             if (!err)
885             {
886                 pirq->mode = mode;
887             }
888         }
889         else
890         {
891             err = -RT_ENOSYS;
892         }
893 
894         rt_hw_spin_unlock(&pirq->rw_lock.lock);
895     }
896 
897     return err;
898 }
899 
rt_pic_irq_get_triger_mode(int irq)900 rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
901 {
902     rt_uint32_t mode = RT_UINT32_MAX;
903     struct rt_pic_irq *pirq = irq2pirq(irq);
904 
905     if (pirq)
906     {
907         rt_hw_spin_lock(&pirq->rw_lock.lock);
908 
909         mode = pirq->mode;
910 
911         rt_hw_spin_unlock(&pirq->rw_lock.lock);
912     }
913 
914     return mode;
915 }
916 
rt_pic_irq_send_ipi(int irq,rt_bitmap_t * cpumask)917 void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
918 {
919     struct rt_pic_irq *pirq;
920 
921     if (cpumask && (pirq = irq2pirq(irq)))
922     {
923         rt_hw_spin_lock(&pirq->rw_lock.lock);
924 
925         if (pirq->pic->ops->irq_send_ipi)
926         {
927             pirq->pic->ops->irq_send_ipi(pirq, cpumask);
928         }
929 
930         rt_hw_spin_unlock(&pirq->rw_lock.lock);
931     }
932 }
933 
rt_pic_irq_set_state_raw(struct rt_pic * pic,int hwirq,int type,rt_bool_t state)934 rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
935 {
936     rt_err_t err;
937 
938     if (pic && hwirq >= 0)
939     {
940         if (pic->ops->irq_set_state)
941         {
942             err = pic->ops->irq_set_state(pic, hwirq, type, state);
943         }
944         else
945         {
946             err = -RT_ENOSYS;
947         }
948     }
949     else
950     {
951         err = -RT_EINVAL;
952     }
953 
954     return err;
955 }
956 
rt_pic_irq_get_state_raw(struct rt_pic * pic,int hwirq,int type,rt_bool_t * out_state)957 rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
958 {
959     rt_err_t err;
960 
961     if (pic && hwirq >= 0)
962     {
963         if (pic->ops->irq_get_state)
964         {
965             rt_bool_t state;
966 
967             if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
968             {
969                 *out_state = state;
970             }
971         }
972         else
973         {
974             err = -RT_ENOSYS;
975         }
976     }
977     else
978     {
979         err = -RT_EINVAL;
980     }
981 
982     return err;
983 }
984 
rt_pic_irq_set_state(int irq,int type,rt_bool_t state)985 rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
986 {
987     rt_err_t err;
988     struct rt_pic_irq *pirq = irq2pirq(irq);
989 
990     RT_ASSERT(pirq != RT_NULL);
991 
992     rt_hw_spin_lock(&pirq->rw_lock.lock);
993     err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
994     rt_hw_spin_unlock(&pirq->rw_lock.lock);
995 
996     return err;
997 }
998 
rt_pic_irq_get_state(int irq,int type,rt_bool_t * out_state)999 rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
1000 {
1001     rt_err_t err;
1002     struct rt_pic_irq *pirq = irq2pirq(irq);
1003 
1004     RT_ASSERT(pirq != RT_NULL);
1005 
1006     rt_hw_spin_lock(&pirq->rw_lock.lock);
1007     err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
1008     rt_hw_spin_unlock(&pirq->rw_lock.lock);
1009 
1010     return err;
1011 }
1012 
rt_pic_irq_parent_enable(struct rt_pic_irq * pirq)1013 void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
1014 {
1015     RT_ASSERT(pirq != RT_NULL);
1016     pirq = pirq->parent;
1017 
1018     if (pirq->pic->ops->irq_enable)
1019     {
1020         pirq->pic->ops->irq_enable(pirq);
1021     }
1022 }
1023 
rt_pic_irq_parent_disable(struct rt_pic_irq * pirq)1024 void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
1025 {
1026     RT_ASSERT(pirq != RT_NULL);
1027     pirq = pirq->parent;
1028 
1029     if (pirq->pic->ops->irq_disable)
1030     {
1031         pirq->pic->ops->irq_disable(pirq);
1032     }
1033 }
1034 
rt_pic_irq_parent_ack(struct rt_pic_irq * pirq)1035 void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
1036 {
1037     RT_ASSERT(pirq != RT_NULL);
1038     pirq = pirq->parent;
1039 
1040     if (pirq->pic->ops->irq_ack)
1041     {
1042         pirq->pic->ops->irq_ack(pirq);
1043     }
1044 }
1045 
rt_pic_irq_parent_mask(struct rt_pic_irq * pirq)1046 void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
1047 {
1048     RT_ASSERT(pirq != RT_NULL);
1049     pirq = pirq->parent;
1050 
1051     if (pirq->pic->ops->irq_mask)
1052     {
1053         pirq->pic->ops->irq_mask(pirq);
1054     }
1055 }
1056 
rt_pic_irq_parent_unmask(struct rt_pic_irq * pirq)1057 void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
1058 {
1059     RT_ASSERT(pirq != RT_NULL);
1060     pirq = pirq->parent;
1061 
1062     if (pirq->pic->ops->irq_unmask)
1063     {
1064         pirq->pic->ops->irq_unmask(pirq);
1065     }
1066 }
1067 
rt_pic_irq_parent_eoi(struct rt_pic_irq * pirq)1068 void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
1069 {
1070     RT_ASSERT(pirq != RT_NULL);
1071     pirq = pirq->parent;
1072 
1073     if (pirq->pic->ops->irq_eoi)
1074     {
1075         pirq->pic->ops->irq_eoi(pirq);
1076     }
1077 }
1078 
rt_pic_irq_parent_set_priority(struct rt_pic_irq * pirq,rt_uint32_t priority)1079 rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
1080 {
1081     rt_err_t err = -RT_ENOSYS;
1082 
1083     RT_ASSERT(pirq != RT_NULL);
1084     pirq = pirq->parent;
1085 
1086     if (pirq->pic->ops->irq_set_priority)
1087     {
1088         if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
1089         {
1090             pirq->priority = priority;
1091         }
1092     }
1093 
1094     return err;
1095 }
1096 
rt_pic_irq_parent_set_affinity(struct rt_pic_irq * pirq,rt_bitmap_t * affinity)1097 rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
1098 {
1099     rt_err_t err = -RT_ENOSYS;
1100 
1101     RT_ASSERT(pirq != RT_NULL);
1102     pirq = pirq->parent;
1103 
1104     if (pirq->pic->ops->irq_set_affinity)
1105     {
1106         if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
1107         {
1108             rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
1109         }
1110     }
1111 
1112     return err;
1113 }
1114 
rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq * pirq,rt_uint32_t mode)1115 rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
1116 {
1117     rt_err_t err = -RT_ENOSYS;
1118 
1119     RT_ASSERT(pirq != RT_NULL);
1120     pirq = pirq->parent;
1121 
1122     if (pirq->pic->ops->irq_set_triger_mode)
1123     {
1124         if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
1125         {
1126             pirq->mode = mode;
1127         }
1128     }
1129 
1130     return err;
1131 }
1132 
1133 #ifdef RT_USING_OFW
1134 RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
1135 
ofw_pic_init(void)1136 static rt_err_t ofw_pic_init(void)
1137 {
1138     struct rt_ofw_node *ic_np;
1139 
1140     rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
1141     {
1142         rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
1143     }
1144 
1145     return RT_EOK;
1146 }
1147 #else
ofw_pic_init(void)1148 static rt_err_t ofw_pic_init(void)
1149 {
1150     return RT_EOK;
1151 }
1152 #endif /* !RT_USING_OFW */
1153 
rt_pic_init(void)1154 rt_err_t rt_pic_init(void)
1155 {
1156     rt_err_t err;
1157 
1158     LOG_D("init start");
1159 
1160     err = ofw_pic_init();
1161 
1162     LOG_D("init end");
1163 
1164     return err;
1165 }
1166 
1167 #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
list_irq(int argc,char ** argv)1168 static int list_irq(int argc, char**argv)
1169 {
1170     rt_size_t irq_nr = 0;
1171     rt_bool_t dump_all = RT_FALSE;
1172     const char *const irq_modes[] =
1173     {
1174         [RT_IRQ_MODE_NONE] = "None",
1175         [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
1176         [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
1177         [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
1178         [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
1179         [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
1180     };
1181     static char info[RT_CONSOLEBUF_SIZE];
1182 #ifdef RT_USING_SMP
1183     static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
1184 #endif
1185 
1186     if (argc > 1)
1187     {
1188         if (!rt_strcmp(argv[1], "all"))
1189         {
1190             dump_all = RT_TRUE;
1191         }
1192     }
1193 
1194     rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
1195             6, "IRQ",
1196             6, "HW-IRQ",
1197             "MSI",
1198             _pic_name_max, "PIC",
1199             12, "Mode",
1200         #ifdef RT_USING_SMP
1201             RT_CPUS_NR, "CPUs",
1202         #else
1203             0, 0,
1204         #endif
1205         #ifdef RT_USING_INTERRUPT_INFO
1206             11, "Count",
1207             5, ""
1208         #else
1209             0, 0,
1210             10, "-Number"
1211         #endif
1212             );
1213 
1214 #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
1215     for (int i = 0; i < RT_CPUS_NR; i++)
1216     {
1217         rt_kprintf(" cpu%2d     ", i);
1218     }
1219 #endif
1220 
1221 #ifdef RT_USING_PIC_STATISTICS
1222     rt_kprintf(" max/ns      avg/ns      min/ns");
1223 #endif
1224 
1225     rt_kputs("\n");
1226 
1227     for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
1228     {
1229         struct rt_pic_irq *pirq = &_pirq_hash[i];
1230 
1231         if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
1232         {
1233             continue;
1234         }
1235 
1236         rt_snprintf(info, sizeof(info), "%-6d %-6d %c   %-*.s %-*.s ",
1237                 pirq->irq,
1238                 pirq->hwirq,
1239                 pirq->msi_desc ? 'Y' : 'N',
1240                 _pic_name_max, pirq->pic->ops->name,
1241                 12, irq_modes[pirq->mode]);
1242 
1243     #ifdef RT_USING_SMP
1244         for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
1245         {
1246             rt_bitmap_t mask = pirq->affinity[group];
1247 
1248             for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
1249             {
1250                 cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
1251             }
1252         }
1253     #endif /* RT_USING_SMP */
1254 
1255         rt_kputs(info);
1256     #ifdef RT_USING_SMP
1257         rt_kputs(cpumask);
1258     #endif
1259 
1260     #ifdef RT_USING_INTERRUPT_INFO
1261         rt_kprintf(" %-10d ", pirq->isr.action.counter);
1262         rt_kprintf("%-*.s", 10, pirq->isr.action.name);
1263     #ifdef RT_USING_SMP
1264         for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
1265         {
1266             rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
1267         }
1268     #endif
1269     #ifdef RT_USING_PIC_STATISTICS
1270         rt_kprintf(" %-10d  %-10d  %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
1271     #endif
1272         rt_kputs("\n");
1273 
1274         if (!rt_list_isempty(&pirq->isr.list))
1275         {
1276             struct rt_pic_isr *repeat_isr;
1277 
1278             rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
1279             {
1280                 rt_kputs(info);
1281             #ifdef RT_USING_SMP
1282                 rt_kputs(cpumask);
1283             #endif
1284                 rt_kprintf(" %-10d ", repeat_isr->action.counter);
1285                 rt_kprintf("%-*.s", 10, repeat_isr->action.name);
1286             #ifdef RT_USING_SMP
1287                 for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
1288                 {
1289                     rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
1290                 }
1291             #endif
1292             #ifdef RT_USING_PIC_STATISTICS
1293                 rt_kprintf(" ---         ---         ---");
1294             #endif
1295                 rt_kputs("\n");
1296             }
1297         }
1298     #else
1299         rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
1300     #endif
1301 
1302         ++irq_nr;
1303     }
1304 
1305     rt_kprintf("%d IRQs found\n", irq_nr);
1306 
1307     return 0;
1308 }
1309 MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
1310 #endif /* RT_USING_CONSOLE && RT_USING_MSH */
1311