1 /******************************************************************************
2 * common/softirq.c
3 *
4 * Softirqs in Xen are only executed in an outermost activation (e.g., never
5 * within an interrupt activation). This simplifies some things and generally
6 * seems a good thing.
7 *
8 * Copyright (c) 2003, K A Fraser
9 * Copyright (c) 1992, Linus Torvalds
10 */
11
12 #include <xen/init.h>
13 #include <xen/mm.h>
14 #include <xen/preempt.h>
15 #include <xen/sched.h>
16 #include <xen/rcupdate.h>
17 #include <xen/softirq.h>
18
19 #ifndef __ARCH_IRQ_STAT
20 irq_cpustat_t irq_stat[NR_CPUS];
21 #endif
22
23 static softirq_handler softirq_handlers[NR_SOFTIRQS];
24
25 static DEFINE_PER_CPU(cpumask_t, batch_mask);
26 static DEFINE_PER_CPU(unsigned int, batching);
27
__do_softirq(unsigned long ignore_mask)28 static void __do_softirq(unsigned long ignore_mask)
29 {
30 unsigned int i, cpu;
31 unsigned long pending;
32
33 for ( ; ; )
34 {
35 /*
36 * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move
37 * us to another processor.
38 */
39 cpu = smp_processor_id();
40
41 if ( rcu_pending(cpu) )
42 rcu_check_callbacks(cpu);
43
44 if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
45 || cpu_is_offline(cpu) )
46 break;
47
48 i = find_first_set_bit(pending);
49 clear_bit(i, &softirq_pending(cpu));
50 (*softirq_handlers[i])();
51 }
52 }
53
process_pending_softirqs(void)54 void process_pending_softirqs(void)
55 {
56 ASSERT(!in_irq() && local_irq_is_enabled());
57 /* Do not enter scheduler as it can preempt the calling context. */
58 __do_softirq(1ul<<SCHEDULE_SOFTIRQ);
59 }
60
do_softirq(void)61 void do_softirq(void)
62 {
63 ASSERT_NOT_IN_ATOMIC();
64 __do_softirq(0);
65 }
66
open_softirq(int nr,softirq_handler handler)67 void open_softirq(int nr, softirq_handler handler)
68 {
69 ASSERT(nr < NR_SOFTIRQS);
70 softirq_handlers[nr] = handler;
71 }
72
cpumask_raise_softirq(const cpumask_t * mask,unsigned int nr)73 void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
74 {
75 unsigned int cpu, this_cpu = smp_processor_id();
76 cpumask_t send_mask, *raise_mask;
77
78 if ( !per_cpu(batching, this_cpu) || in_irq() )
79 {
80 cpumask_clear(&send_mask);
81 raise_mask = &send_mask;
82 }
83 else
84 raise_mask = &per_cpu(batch_mask, this_cpu);
85
86 for_each_cpu(cpu, mask)
87 if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
88 cpu != this_cpu &&
89 !arch_skip_send_event_check(cpu) )
90 __cpumask_set_cpu(cpu, raise_mask);
91
92 if ( raise_mask == &send_mask )
93 smp_send_event_check_mask(raise_mask);
94 }
95
cpu_raise_softirq(unsigned int cpu,unsigned int nr)96 void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
97 {
98 unsigned int this_cpu = smp_processor_id();
99
100 if ( test_and_set_bit(nr, &softirq_pending(cpu))
101 || (cpu == this_cpu)
102 || arch_skip_send_event_check(cpu) )
103 return;
104
105 if ( !per_cpu(batching, this_cpu) || in_irq() )
106 smp_send_event_check_cpu(cpu);
107 else
108 __cpumask_set_cpu(cpu, &per_cpu(batch_mask, this_cpu));
109 }
110
cpu_raise_softirq_batch_begin(void)111 void cpu_raise_softirq_batch_begin(void)
112 {
113 ++this_cpu(batching);
114 }
115
cpu_raise_softirq_batch_finish(void)116 void cpu_raise_softirq_batch_finish(void)
117 {
118 unsigned int cpu, this_cpu = smp_processor_id();
119 cpumask_t *mask = &per_cpu(batch_mask, this_cpu);
120
121 ASSERT(per_cpu(batching, this_cpu));
122 for_each_cpu ( cpu, mask )
123 if ( !softirq_pending(cpu) )
124 __cpumask_clear_cpu(cpu, mask);
125 smp_send_event_check_mask(mask);
126 cpumask_clear(mask);
127 --per_cpu(batching, this_cpu);
128 }
129
raise_softirq(unsigned int nr)130 void raise_softirq(unsigned int nr)
131 {
132 set_bit(nr, &softirq_pending(smp_processor_id()));
133 }
134
softirq_init(void)135 void __init softirq_init(void)
136 {
137 }
138
139 /*
140 * Local variables:
141 * mode: C
142 * c-file-style: "BSD"
143 * c-basic-offset: 4
144 * tab-width: 4
145 * indent-tabs-mode: nil
146 * End:
147 */
148