1 /******************************************************************************
2  * common/softirq.c
3  *
4  * Softirqs in Xen are only executed in an outermost activation (e.g., never
5  * within an interrupt activation). This simplifies some things and generally
6  * seems a good thing.
7  *
8  * Copyright (c) 2003, K A Fraser
9  * Copyright (c) 1992, Linus Torvalds
10  */
11 
12 #include <xen/init.h>
13 #include <xen/mm.h>
14 #include <xen/preempt.h>
15 #include <xen/sched.h>
16 #include <xen/rcupdate.h>
17 #include <xen/softirq.h>
18 
19 #ifndef __ARCH_IRQ_STAT
20 irq_cpustat_t irq_stat[NR_CPUS];
21 #endif
22 
23 static softirq_handler softirq_handlers[NR_SOFTIRQS];
24 
25 static DEFINE_PER_CPU(cpumask_t, batch_mask);
26 static DEFINE_PER_CPU(unsigned int, batching);
27 
__do_softirq(unsigned long ignore_mask)28 static void __do_softirq(unsigned long ignore_mask)
29 {
30     unsigned int i, cpu;
31     unsigned long pending;
32     bool rcu_allowed = !(ignore_mask & (1UL << RCU_SOFTIRQ));
33 
34     ASSERT(!rcu_allowed || rcu_quiesce_allowed());
35 
36     for ( ; ; )
37     {
38         /*
39          * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ or
40          * SCHED_SLAVE_SOFTIRQ may move us to another processor.
41          */
42         cpu = smp_processor_id();
43 
44         if ( rcu_allowed && rcu_pending(cpu) )
45             rcu_check_callbacks(cpu);
46 
47         if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
48              || cpu_is_offline(cpu) )
49             break;
50 
51         i = ffsl(pending) - 1;
52         clear_bit(i, &softirq_pending(cpu));
53         (*softirq_handlers[i])();
54     }
55 }
56 
process_pending_softirqs(void)57 void process_pending_softirqs(void)
58 {
59     /* Do not enter scheduler as it can preempt the calling context. */
60     unsigned long ignore_mask = (1UL << SCHEDULE_SOFTIRQ) |
61                                 (1UL << SCHED_SLAVE_SOFTIRQ);
62 
63     /* Block RCU processing in case of rcu_read_lock() held. */
64     if ( !rcu_quiesce_allowed() )
65         ignore_mask |= 1UL << RCU_SOFTIRQ;
66 
67     ASSERT(!in_irq() && local_irq_is_enabled());
68     __do_softirq(ignore_mask);
69 }
70 
do_softirq(void)71 void do_softirq(void)
72 {
73     ASSERT_NOT_IN_ATOMIC();
74     __do_softirq(0);
75 }
76 
open_softirq(int nr,softirq_handler handler)77 void open_softirq(int nr, softirq_handler handler)
78 {
79     ASSERT(nr < NR_SOFTIRQS);
80     softirq_handlers[nr] = handler;
81 }
82 
cpumask_raise_softirq(const cpumask_t * mask,unsigned int nr)83 void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
84 {
85     unsigned int cpu, this_cpu = smp_processor_id();
86     cpumask_t send_mask, *raise_mask;
87 
88     if ( !per_cpu(batching, this_cpu) || in_irq() )
89     {
90         cpumask_clear(&send_mask);
91         raise_mask = &send_mask;
92     }
93     else
94         raise_mask = &per_cpu(batch_mask, this_cpu);
95 
96     for_each_cpu(cpu, mask)
97         if ( !arch_set_softirq(nr, cpu) && cpu != this_cpu )
98             __cpumask_set_cpu(cpu, raise_mask);
99 
100     if ( raise_mask == &send_mask )
101         smp_send_event_check_mask(raise_mask);
102 }
103 
cpu_raise_softirq(unsigned int cpu,unsigned int nr)104 void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
105 {
106     unsigned int this_cpu = smp_processor_id();
107 
108     if ( arch_set_softirq(nr, cpu) || cpu == this_cpu )
109         return;
110 
111     if ( !per_cpu(batching, this_cpu) || in_irq() )
112         smp_send_event_check_cpu(cpu);
113     else
114         __cpumask_set_cpu(cpu, &per_cpu(batch_mask, this_cpu));
115 }
116 
cpu_raise_softirq_batch_begin(void)117 void cpu_raise_softirq_batch_begin(void)
118 {
119     ++this_cpu(batching);
120 }
121 
cpu_raise_softirq_batch_finish(void)122 void cpu_raise_softirq_batch_finish(void)
123 {
124     unsigned int cpu, this_cpu = smp_processor_id();
125     cpumask_t *mask = &per_cpu(batch_mask, this_cpu);
126 
127     ASSERT(per_cpu(batching, this_cpu));
128     for_each_cpu ( cpu, mask )
129         if ( !softirq_pending(cpu) )
130             __cpumask_clear_cpu(cpu, mask);
131     smp_send_event_check_mask(mask);
132     cpumask_clear(mask);
133     --per_cpu(batching, this_cpu);
134 }
135 
raise_softirq(unsigned int nr)136 void raise_softirq(unsigned int nr)
137 {
138     unsigned int cpu = smp_processor_id();
139 
140     set_bit(nr, &softirq_pending(cpu));
141 }
142 
143 /*
144  * Local variables:
145  * mode: C
146  * c-file-style: "BSD"
147  * c-basic-offset: 4
148  * tab-width: 4
149  * indent-tabs-mode: nil
150  * End:
151  */
152