1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* interrupt.h */
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <stddef.h>
9 #include <string.h>
10 #include <stdbool.h>
11 
12 #define NR_CPUS       1
13 #define NR_IRQS       (207)
14 #define BITS_PER_BYTE 8
15 #define __KERNEL_DIV_ROUND_UP(n, d)     (((n) + (d) - 1) / (d))
16 #define BITS_PER_TYPE(type)         (sizeof(type) * BITS_PER_BYTE)
17 //#define DIV_ROUND_UP              __KERNEL_DIV_ROUND_UP
18 #ifndef BITS_TO_LONGS
19 #define BITS_TO_LONGS(nr)           __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
20 #endif
21 #define DECLARE_BITMAP(name,bits) \
22     unsigned long name[BITS_TO_LONGS(bits)]
23 
24 /* Don't assign or return these: may not be this big! */
25 typedef struct cpumask
26 {
27     DECLARE_BITMAP(bits, NR_CPUS);
28 } cpumask_t;
29 /*
30  * These correspond to the IORESOURCE_IRQ_* defines in
31  * linux/ioport.h to select the interrupt line behaviour.  When
32  * requesting an interrupt without specifying a IRQF_TRIGGER, the
33  * setting should be assumed to be "as already configured", which
34  * may be as per machine or firmware initialisation.
35  */
36 #define IRQF_TRIGGER_NONE   0x00000000
37 #define IRQF_TRIGGER_RISING 0x00000001
38 #define IRQF_TRIGGER_FALLING    0x00000002
39 #define IRQF_TRIGGER_HIGH   0x00000004
40 #define IRQF_TRIGGER_LOW    0x00000008
41 #define IRQF_TRIGGER_MASK   (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
42                              IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
43 #define IRQF_TRIGGER_PROBE  0x00000010
44 
45 /*
46  * These flags used only by the kernel as part of the
47  * irq handling routines.
48  *
49  * IRQF_SHARED - allow sharing the irq among several devices
50  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
51  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
52  * IRQF_PERCPU - Interrupt is per cpu
53  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
54  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
55  *                registered first in a shared interrupt is considered for
56  *                performance reasons)
57  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
58  *                Used by threaded interrupts which need to keep the
59  *                irq line disabled until the threaded handler has been run.
60  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
61  *                   that this interrupt will wake the system from a suspended
62  *                   state.  See Documentation/power/suspend-and-interrupts.rst
63  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
64  * IRQF_NO_THREAD - Interrupt cannot be threaded
65  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
66  *                resume time.
67  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
68  *                interrupt handler after suspending interrupts. For system
69  *                wakeup devices users need to implement wakeup detection in
70  *                their interrupt handlers.
71  */
72 #define IRQF_SHARED     0x00000080
73 #define IRQF_PROBE_SHARED   0x00000100
74 #define __IRQF_TIMER        0x00000200
75 #define IRQF_PERCPU     0x00000400
76 #define IRQF_NOBALANCING    0x00000800
77 #define IRQF_IRQPOLL        0x00001000
78 #define IRQF_ONESHOT        0x00002000
79 #define IRQF_NO_SUSPEND     0x00004000
80 #define IRQF_FORCE_RESUME   0x00008000
81 #define IRQF_NO_THREAD      0x00010000
82 #define IRQF_EARLY_RESUME   0x00020000
83 #define IRQF_COND_SUSPEND   0x00040000
84 
85 #define IRQF_TIMER      (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
86 #define __must_check            __attribute__((__warn_unused_result__))
87 
88 /*
89  * These values can be returned by request_any_context_irq() and
90  * describe the context the interrupt will be run in.
91  *
92  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
93  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
94  */
95 enum
96 {
97     IRQC_IS_HARDIRQ = 0,
98     IRQC_IS_NESTED,
99 };
100 
101 /**
102  * enum irqreturn
103  * @IRQ_NONE            interrupt was not from this device or was not handled
104  * @IRQ_HANDLED         interrupt was handled by this device
105  * @IRQ_WAKE_THREAD     handler requests to wake the handler thread
106  */
107 enum irqreturn
108 {
109     IRQ_NONE                = (0 << 0),
110     IRQ_HANDLED             = (1 << 0),
111     IRQ_WAKE_THREAD         = (1 << 1),
112 };
113 
114 #define IRQACTION_NAME_MAX 16
115 
116 typedef enum irqreturn irqreturn_t;
117 #define IRQ_RETVAL(x)   ((x) ? IRQ_HANDLED : IRQ_NONE)
118 
119 typedef irqreturn_t (*irq_handler_t)(int, void *);
120 
121 /**
122  * struct irqaction - per interrupt action descriptor
123  * @handler:    interrupt handler function
124  * @name:   name of the device
125  * @dev_id: cookie to identify the device
126  * @percpu_dev_id:  cookie to identify the device
127  * @next:   pointer to the next irqaction for shared interrupts
128  * @irq:    interrupt number
129  * @flags:  flags (see IRQF_* above)
130  * @thread_fn:  interrupt handler function for threaded interrupts
131  * @thread: thread pointer for threaded interrupts
132  * @secondary:  pointer to secondary irqaction (force threading)
133  * @thread_flags:   flags related to @thread
134  * @thread_mask:    bitmask for keeping track of @thread activity
135  * @dir:    pointer to the proc/irq/NN/name entry
136  */
137 struct irqaction
138 {
139     irq_handler_t       handler;
140     void                *dev_id;
141     struct irqaction    *next;
142     struct irqaction    *secondary;
143     unsigned int        irq;
144     unsigned int        flags;
145     unsigned long       thread_flags;
146     unsigned long       thread_mask;
147     unsigned long       irq_nums;
148     const char          name[IRQACTION_NAME_MAX];
149 };
150 
151 extern irqreturn_t no_action(int cpl, void *dev_id);
152 
153 /*
154  * If a (PCI) device interrupt is not connected we set dev->irq to
155  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
156  * can distingiush that case from other error returns.
157  *
158  * 0x80000000 is guaranteed to be outside the available range of interrupts
159  * and easy to distinguish from other possible incorrect values.
160  */
161 #define IRQ_NOTCONNECTED    (1U << 31)
162 
163 extern int __must_check
164 request_threaded_irq(unsigned int irq, irq_handler_t handler,
165                      irq_handler_t thread_fn,
166                      unsigned long flags, const char *name, void *dev);
167 
request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev)168 static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
169         const char *name, void *dev)
170 {
171     return request_threaded_irq(irq, handler, NULL, flags, name, dev);
172 }
173 
174 extern int __must_check
175 request_any_context_irq(unsigned int irq, irq_handler_t handler,
176                         unsigned long flags, const char *name, void *dev_id);
177 
178 /*
179  *extern int __must_check
180  *__request_percpu_irq(unsigned int irq, irq_handler_t handler,
181  *                     unsigned long flags, const char *devname,
182  *                     void __percpu *percpu_dev_id);
183  */
184 
185 extern int __must_check
186 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
187             const char *name, void *dev);
188 
189 /*
190  *static inline int __must_check
191  *request_percpu_irq(unsigned int irq, irq_handler_t handler,
192  *                   const char *devname, void __percpu *percpu_dev_id)
193  *{
194  *        return __request_percpu_irq(irq, handler, 0,
195  *                                    devname, percpu_dev_id);
196  *}
197  *
198  */
199 /*
200  *extern int __must_check
201  *request_percpu_nmi(unsigned int irq, irq_handler_t handler,
202  *                   const char *devname, void __percpu *dev);
203  */
204 
205 extern const void *free_irq(unsigned int, void *);
206 //extern void free_percpu_irq(unsigned int, void __percpu *);
207 
208 extern const void *free_nmi(unsigned int irq, void *dev_id);
209 //extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
210 
211 //struct device;
212 
213 /*
214  *extern int __must_check
215  *devm_request_threaded_irq(struct device *dev, unsigned int irq,
216  *                          irq_handler_t handler, irq_handler_t thread_fn,
217  *                          unsigned long irqflags, const char *devname,
218  *                          void *dev_id);
219  */
220 
221 /*
222  *static inline int __must_check
223  *devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
224  *                 unsigned long irqflags, const char *devname, void *dev_id)
225  *{
226  *        return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
227  *                                         devname, dev_id);
228  *}
229  */
230 
231 /*
232  *extern int __must_check
233  *devm_request_any_context_irq(struct device *dev, unsigned int irq,
234  *                 irq_handler_t handler, unsigned long irqflags,
235  *                 const char *devname, void *dev_id);
236  */
237 
238 //extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
239 
240 /*
241  * On lockdep we dont want to enable hardirqs in hardirq
242  * context. Use local_irq_enable_in_hardirq() to annotate
243  * kernel code that has to do this nevertheless (pretty much
244  * the only valid case is for old/broken hardware that is
245  * insanely slow).
246  *
247  * NOTE: in theory this might break fragile code that relies
248  * on hardirq delivery - in practice we dont seem to have such
249  * places left. So the only effect should be slightly increased
250  * irqs-off latencies.
251  */
252 #ifdef CONFIG_LOCKDEP
253 # define local_irq_enable_in_hardirq()  do { } while (0)
254 #else
255 # define local_irq_enable_in_hardirq()  local_irq_enable()
256 #endif
257 
258 extern void disable_irq_nosync(unsigned int irq);
259 extern bool disable_hardirq(unsigned int irq);
260 extern void disable_irq(unsigned int irq);
261 extern void disable_percpu_irq(unsigned int irq);
262 extern void enable_irq(unsigned int irq);
263 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
264 extern bool irq_percpu_is_enabled(unsigned int irq);
265 extern void irq_wake_thread(unsigned int irq, void *dev_id);
266 
267 extern void disable_nmi_nosync(unsigned int irq);
268 extern void disable_percpu_nmi(unsigned int irq);
269 extern void enable_nmi(unsigned int irq);
270 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
271 extern int prepare_percpu_nmi(unsigned int irq);
272 extern void teardown_percpu_nmi(unsigned int irq);
273 
274 /* The following three functions are for the core kernel use only. */
275 extern void suspend_device_irqs(void);
276 extern void resume_device_irqs(void);
277 
278 /**
279  * struct irq_affinity_notify - context for notification of IRQ affinity changes
280  * @irq:        Interrupt to which notification applies
281  * @kref:       Reference count, for internal use
282  * @work:       Work item, for internal use
283  * @notify:     Function to be called on change.  This will be
284  *          called in process context.
285  * @release:        Function to be called on release.  This will be
286  *          called in process context.  Once registered, the
287  *          structure must only be freed when this function is
288  *          called or later.
289  */
290 struct irq_affinity_notify
291 {
292     unsigned int irq;
293     //struct kref kref;
294     //struct work_struct work;
295     //void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
296     //void (*release)(struct kref *ref);
297 };
298 
299 #define IRQ_AFFINITY_MAX_SETS  4
300 
301 /**
302  * struct irq_affinity - Description for automatic irq affinity assignements
303  * @pre_vectors:    Don't apply affinity to @pre_vectors at beginning of
304  *          the MSI(-X) vector space
305  * @post_vectors:   Don't apply affinity to @post_vectors at end of
306  *          the MSI(-X) vector space
307  * @nr_sets:        The number of interrupt sets for which affinity
308  *          spreading is required
309  * @set_size:       Array holding the size of each interrupt set
310  * @calc_sets:      Callback for calculating the number and size
311  *          of interrupt sets
312  * @priv:       Private data for usage by @calc_sets, usually a
313  *          pointer to driver/device specific data.
314  */
315 struct irq_affinity
316 {
317     unsigned int    pre_vectors;
318     unsigned int    post_vectors;
319     unsigned int    nr_sets;
320     //unsigned int  set_size[IRQ_AFFINITY_MAX_SETS];
321     //void      (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
322     void        *priv;
323 };
324 
325 /**
326  * struct irq_affinity_desc - Interrupt affinity descriptor
327  * @mask:   cpumask to hold the affinity assignment
328  * @is_managed: 1 if the interrupt is managed internally
329  */
330 /*
331  *struct irq_affinity_desc {
332  *        struct cpumask    mask;
333  *        unsigned int  is_managed : 1;
334  *};
335  */
336 
337 #if (0) /*defined(CONFIG_SMP)*/
338 
339 extern cpumask_var_t irq_default_affinity;
340 
341 /* Internal implementation. Use the helpers below */
342 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
343                               bool force);
344 
345 /**
346  * irq_set_affinity - Set the irq affinity of a given irq
347  * @irq:    Interrupt to set affinity
348  * @cpumask:    cpumask
349  *
350  * Fails if cpumask does not contain an online CPU
351  */
irq_set_affinity(unsigned int irq,const struct cpumask * cpumask)352 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
353 {
354     return __irq_set_affinity(irq, cpumask, false);
355 }
356 
357 /**
358  * irq_force_affinity - Force the irq affinity of a given irq
359  * @irq:    Interrupt to set affinity
360  * @cpumask:    cpumask
361  *
362  * Same as irq_set_affinity, but without checking the mask against
363  * online cpus.
364  *
365  * Solely for low level cpu hotplug code, where we need to make per
366  * cpu interrupts affine before the cpu becomes online.
367  */
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)368 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
369 {
370     return __irq_set_affinity(irq, cpumask, true);
371 }
372 
373 extern int irq_can_set_affinity(unsigned int irq);
374 extern int irq_select_affinity(unsigned int irq);
375 
376 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
377 
378 extern int
379 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
380 
381 struct irq_affinity_desc *
382 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
383 
384 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
385                                        const struct irq_affinity *affd);
386 
387 #else /* CONFIG_SMP */
388 
irq_set_affinity(unsigned int irq,const struct cpumask * m)389 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
390 {
391     return -1;
392 }
393 
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)394 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
395 {
396     return 0;
397 }
398 
irq_can_set_affinity(unsigned int irq)399 static inline int irq_can_set_affinity(unsigned int irq)
400 {
401     return 0;
402 }
403 
irq_select_affinity(unsigned int irq)404 static inline int irq_select_affinity(unsigned int irq)
405 {
406     return 0;
407 }
408 
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)409 static inline int irq_set_affinity_hint(unsigned int irq,
410                                         const struct cpumask *m)
411 {
412     return -1;
413 }
414 
415 /*
416  *static inline int
417  *irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
418  *{
419  *        return 0;
420  *}
421  */
422 
423 /*
424  *static inline struct irq_affinity_desc *
425  *irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
426  *{
427  *        return NULL;
428  *}
429  */
430 
431 /*
432  *static inline unsigned int
433  *irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
434  *                          const struct irq_affinity *affd)
435  *{
436  *        return maxvec;
437  *}
438  */
439 
440 #endif /* CONFIG_SMP */
441 
442 /*
443  * Special lockdep variants of irq disabling/enabling.
444  * These should be used for locking constructs that
445  * know that a particular irq context which is disabled,
446  * and which is the only irq-context user of a lock,
447  * that it's safe to take the lock in the irq-disabled
448  * section without disabling hardirqs.
449  *
450  * On !CONFIG_LOCKDEP they are equivalent to the normal
451  * irq disable/enable methods.
452  */
disable_irq_nosync_lockdep(unsigned int irq)453 static inline void disable_irq_nosync_lockdep(unsigned int irq)
454 {
455     disable_irq_nosync(irq);
456 #ifdef CONFIG_LOCKDEP
457     local_irq_disable();
458 #endif
459 }
460 
disable_irq_nosync_lockdep_irqsave(unsigned int irq,unsigned long * flags)461 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
462 {
463     disable_irq_nosync(irq);
464 #ifdef CONFIG_LOCKDEP
465     local_irq_save(*flags);
466 #endif
467 }
468 
disable_irq_lockdep(unsigned int irq)469 static inline void disable_irq_lockdep(unsigned int irq)
470 {
471     disable_irq(irq);
472 #ifdef CONFIG_LOCKDEP
473     local_irq_disable();
474 #endif
475 }
476 
enable_irq_lockdep(unsigned int irq)477 static inline void enable_irq_lockdep(unsigned int irq)
478 {
479 #ifdef CONFIG_LOCKDEP
480     local_irq_enable();
481 #endif
482     enable_irq(irq);
483 }
484 
enable_irq_lockdep_irqrestore(unsigned int irq,unsigned long * flags)485 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
486 {
487 #ifdef CONFIG_LOCKDEP
488     local_irq_restore(*flags);
489 #endif
490     enable_irq(irq);
491 }
492 
493 /* IRQ wakeup (PM) control: */
494 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
495 
enable_irq_wake(unsigned int irq)496 static inline int enable_irq_wake(unsigned int irq)
497 {
498     return irq_set_irq_wake(irq, 1);
499 }
500 
disable_irq_wake(unsigned int irq)501 static inline int disable_irq_wake(unsigned int irq)
502 {
503     return irq_set_irq_wake(irq, 0);
504 }
505 
506 /*
507  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
508  */
509 enum irqchip_irq_state
510 {
511     IRQCHIP_STATE_PENDING,      /* Is interrupt pending? */
512     IRQCHIP_STATE_ACTIVE,       /* Is interrupt in progress? */
513     IRQCHIP_STATE_MASKED,       /* Is interrupt masked? */
514     IRQCHIP_STATE_LINE_LEVEL,   /* Is IRQ line high? */
515 };
516 
517 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
518                                  bool *state);
519 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
520                                  bool state);
521 
522 #ifdef CONFIG_IRQ_FORCED_THREADING
523 extern bool force_irqthreads;
524 #else
525 #define force_irqthreads    (0)
526 #endif
527 
528 #ifndef local_softirq_pending
529 
530 #ifndef local_softirq_pending_ref
531 //#define local_softirq_pending_ref irq_stat.__softirq_pending
532 #endif
533 
534 //#define local_softirq_pending()   (__this_cpu_read(local_softirq_pending_ref))
535 //#define set_softirq_pending(x)    (__this_cpu_write(local_softirq_pending_ref, (x)))
536 //#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
537 
538 #endif /* local_softirq_pending */
539 
540 /* Some architectures might implement lazy enabling/disabling of
541  * interrupts. In some cases, such as stop_machine, we might want
542  * to ensure that after a local_irq_disable(), interrupts have
543  * really been disabled in hardware. Such architectures need to
544  * implement the following hook.
545  */
546 #ifndef hard_irq_disable
547 #define hard_irq_disable()  do { } while(0)
548 #endif
549 
550 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
551    frequency threaded job scheduling. For almost all the purposes
552    tasklets are more than enough. F.e. all serial device BHs et
553    al. should be converted to tasklets, not to softirqs.
554  */
555 
556 enum
557 {
558     HI_SOFTIRQ = 0,
559     TIMER_SOFTIRQ,
560     NET_TX_SOFTIRQ,
561     NET_RX_SOFTIRQ,
562     BLOCK_SOFTIRQ,
563     IRQ_POLL_SOFTIRQ,
564     TASKLET_SOFTIRQ,
565     SCHED_SOFTIRQ,
566     HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
567                 numbering. Sigh! */
568     RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
569 
570     NR_SOFTIRQS
571 };
572 
573 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
574 
575 /* map softirq index to softirq name. update 'softirq_to_name' in
576  * kernel/softirq.c when adding a new softirq.
577  */
578 extern const char *const softirq_to_name[NR_SOFTIRQS];
579 
580 /* softirq mask and active fields moved to irq_cpustat_t in
581  * asm/hardirq.h to get better cache usage.  KAO
582  */
583 
584 struct softirq_action
585 {
586     void (*action)(struct softirq_action *);
587 };
588 
589 //asmlinkage void do_softirq(void);
590 //asmlinkage void __do_softirq(void);
591 
592 #ifdef __ARCH_HAS_DO_SOFTIRQ
593 void do_softirq_own_stack(void);
594 #else
do_softirq_own_stack(void)595 static inline void do_softirq_own_stack(void)
596 {
597     //__do_softirq();
598 }
599 #endif
600 
601 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
602 extern void softirq_init(void);
603 extern void __raise_softirq_irqoff(unsigned int nr);
604 
605 extern void raise_softirq_irqoff(unsigned int nr);
606 extern void raise_softirq(unsigned int nr);
607 
608 //DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
609 
610 /*
611  *static inline struct task_struct *this_cpu_ksoftirqd(void)
612  *{
613  *        return this_cpu_read(ksoftirqd);
614  *}
615  */
616 
617 /* Tasklets --- multithreaded analogue of BHs.
618 
619    Main feature differing them of generic softirqs: tasklet
620    is running only on one CPU simultaneously.
621 
622    Main feature differing them of BHs: different tasklets
623    may be run simultaneously on different CPUs.
624 
625    Properties:
626    * If tasklet_schedule() is called, then tasklet is guaranteed
627      to be executed on some cpu at least once after this.
628    * If the tasklet is already scheduled, but its execution is still not
629      started, it will be executed only once.
630    * If this tasklet is already running on another CPU (or schedule is called
631      from tasklet itself), it is rescheduled for later.
632    * Tasklet is strictly serialized wrt itself, but not
633      wrt another tasklets. If client needs some intertask synchronization,
634      he makes it with spinlocks.
635  */
636 
637 /*
638  *struct tasklet_struct
639  *{
640  *        struct tasklet_struct *next;
641  *        unsigned long state;
642  *        atomic_t count;
643  *        void (*func)(unsigned long);
644  *        unsigned long data;
645  *};
646  */
647 
648 /*
649  *#define DECLARE_TASKLET(name, func, data) \
650  *struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
651  */
652 
653 /*
654  *#define DECLARE_TASKLET_DISABLED(name, func, data) \
655  *struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
656  */
657 
658 
659 enum
660 {
661     TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
662     TASKLET_STATE_RUN   /* Tasklet is running (SMP only) */
663 };
664 
665 //#ifdef CONFIG_SMP
666 #if 0
667 static inline int tasklet_trylock(struct tasklet_struct *t)
668 {
669     return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
670 }
671 
672 static inline void tasklet_unlock(struct tasklet_struct *t)
673 {
674     smp_mb__before_atomic();
675     clear_bit(TASKLET_STATE_RUN, &(t)->state);
676 }
677 
678 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
679 {
680     while (test_bit(TASKLET_STATE_RUN, &(t)->state))
681     {
682         barrier();
683     }
684 }
685 #else
686 #define tasklet_trylock(t) 1
687 #define tasklet_unlock_wait(t) do { } while (0)
688 #define tasklet_unlock(t) do { } while (0)
689 #endif
690 
691 //extern void __tasklet_schedule(struct tasklet_struct *t);
692 
693 /*
694  *static inline void tasklet_schedule(struct tasklet_struct *t)
695  *{
696  *        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
697  *                __tasklet_schedule(t);
698  *}
699  *
700  *extern void __tasklet_hi_schedule(struct tasklet_struct *t);
701  *
702  *static inline void tasklet_hi_schedule(struct tasklet_struct *t)
703  *{
704  *        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
705  *                __tasklet_hi_schedule(t);
706  *}
707  *
708  *static inline void tasklet_disable_nosync(struct tasklet_struct *t)
709  *{
710  *        atomic_inc(&t->count);
711  *        smp_mb__after_atomic();
712  *}
713  *
714  *static inline void tasklet_disable(struct tasklet_struct *t)
715  *{
716  *        tasklet_disable_nosync(t);
717  *        tasklet_unlock_wait(t);
718  *        smp_mb();
719  *}
720  *
721  *static inline void tasklet_enable(struct tasklet_struct *t)
722  *{
723  *        smp_mb__before_atomic();
724  *        atomic_dec(&t->count);
725  *}
726  *
727  *extern void tasklet_kill(struct tasklet_struct *t);
728  *extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
729  *extern void tasklet_init(struct tasklet_struct *t,
730  *                         void (*func)(unsigned long), unsigned long data);
731  */
732 
733 /*
734  * Autoprobing for irqs:
735  *
736  * probe_irq_on() and probe_irq_off() provide robust primitives
737  * for accurate IRQ probing during kernel initialization.  They are
738  * reasonably simple to use, are not "fooled" by spurious interrupts,
739  * and, unlike other attempts at IRQ probing, they do not get hung on
740  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
741  *
742  * For reasonably foolproof probing, use them as follows:
743  *
744  * 1. clear and/or mask the device's internal interrupt.
745  * 2. sti();
746  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
747  * 4. enable the device and cause it to trigger an interrupt.
748  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
749  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
750  * 7. service the device to clear its pending interrupt.
751  * 8. loop again if paranoia is required.
752  *
753  * probe_irq_on() returns a mask of allocated irq's.
754  *
755  * probe_irq_off() takes the mask as a parameter,
756  * and returns the irq number which occurred,
757  * or zero if none occurred, or a negative irq number
758  * if more than one irq occurred.
759  */
760 
761 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
probe_irq_on(void)762 static inline unsigned long probe_irq_on(void)
763 {
764     return 0;
765 }
probe_irq_off(unsigned long val)766 static inline int probe_irq_off(unsigned long val)
767 {
768     return 0;
769 }
probe_irq_mask(unsigned long val)770 static inline unsigned int probe_irq_mask(unsigned long val)
771 {
772     return 0;
773 }
774 #else
775 extern unsigned long probe_irq_on(void);    /* returns 0 on failure */
776 extern int probe_irq_off(unsigned long);    /* returns 0 or negative on failure */
777 extern unsigned int probe_irq_mask(unsigned long);  /* returns mask of ISA interrupts */
778 #endif
779 
780 #ifdef CONFIG_PROC_FS
781 /* Initialize /proc/irq/ */
782 extern void init_irq_proc(void);
783 #else
init_irq_proc(void)784 static inline void init_irq_proc(void)
785 {
786 }
787 #endif
788 
789 #ifdef CONFIG_IRQ_TIMINGS
790 void irq_timings_enable(void);
791 void irq_timings_disable(void);
792 uint64_t irq_timings_next_event(uint64_t now);
793 #endif
794 
795 /*
796  *struct seq_file;
797  *int show_interrupts(struct seq_file *p, void *v);
798  *int arch_show_interrupts(struct seq_file *p, int prec);
799  *
800  */
801 extern int early_irq_init(void);
802 extern int arch_probe_nr_irqs(void);
803 extern int arch_early_irq_init(void);
804 
805 /*
806  * We want to know which function is an entrypoint of a hardirq or a softirq.
807  */
808 #define __irq_entry      __attribute__((__section__(".irqentry.text")))
809 #define __softirq_entry  \
810     __attribute__((__section__(".softirqentry.text")))
811 
812 #endif
813