1 #ifndef __ASM_SOFTIRQ_H__
2 #define __ASM_SOFTIRQ_H__
3 
4 #include <asm/system.h>
5 
6 #define NMI_SOFTIRQ            (NR_COMMON_SOFTIRQS + 0)
7 #define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
8 #define VCPU_KICK_SOFTIRQ      (NR_COMMON_SOFTIRQS + 2)
9 
10 #define MACHINE_CHECK_SOFTIRQ  (NR_COMMON_SOFTIRQS + 3)
11 #define HVM_DPCI_SOFTIRQ       (NR_COMMON_SOFTIRQS + 4)
12 #define NR_ARCH_SOFTIRQS       5
13 
14 /*
15  * Ensure softirq @nr is pending on @cpu.  Return true if an IPI can be
16  * skipped, false if the IPI cannot be skipped.
17  *
18  * We use a CMPXCHG covering both __softirq_pending and in_mwait, in order to
19  * set softirq @nr while also observing in_mwait in a race-free way.
20  */
arch_set_softirq(unsigned int nr,unsigned int cpu)21 static always_inline bool arch_set_softirq(unsigned int nr, unsigned int cpu)
22 {
23     uint64_t *ptr = &irq_stat[cpu].softirq_mwait_raw;
24     uint64_t prev, old, new;
25     unsigned int softirq = 1U << nr;
26 
27     old = ACCESS_ONCE(*ptr);
28 
29     for ( ;; )
30     {
31         if ( old & softirq )
32             /* Softirq already pending, nothing to do. */
33             return true;
34 
35         new = old | softirq;
36 
37         prev = cmpxchg(ptr, old, new);
38         if ( prev == old )
39             break;
40 
41         old = prev;
42     }
43 
44     /*
45      * We have caused the softirq to become pending.  If in_mwait was set, the
46      * target CPU will notice the modification and act on it.
47      *
48      * We can't access the in_mwait field nicely, so use some BUILD_BUG_ON()'s
49      * to cross-check the (1UL << 32) opencoding.
50      */
51     BUILD_BUG_ON(sizeof(irq_stat[0].softirq_mwait_raw) != 8);
52     BUILD_BUG_ON((offsetof(irq_cpustat_t, in_mwait) -
53                   offsetof(irq_cpustat_t, softirq_mwait_raw)) != 4);
54 
55     return new & (1UL << 32) /* in_mwait */;
56 
57 }
58 #define arch_set_softirq arch_set_softirq
59 
60 #endif /* __ASM_SOFTIRQ_H__ */
61