1 /******************************************************************************
2 * tasklet.c
3 *
4 * Tasklets are dynamically-allocatable tasks run in either VCPU context
5 * (specifically, the idle VCPU's context) or in softirq context, on at most
6 * one CPU at a time. Softirq versus VCPU context execution is specified
7 * during per-tasklet initialisation.
8 *
9 * Copyright (c) 2010, Citrix Systems, Inc.
10 * Copyright (c) 1992, Linus Torvalds
11 *
12 * Authors:
13 * Keir Fraser <keir@xen.org>
14 */
15
16 #include <xen/init.h>
17 #include <xen/sched.h>
18 #include <xen/softirq.h>
19 #include <xen/tasklet.h>
20 #include <xen/cpu.h>
21
22 /* Some subsystems call into us before we are initialised. We ignore them. */
23 static bool_t tasklets_initialised;
24
25 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
26
27 static DEFINE_PER_CPU(struct list_head, tasklet_list);
28 static DEFINE_PER_CPU(struct list_head, softirq_tasklet_list);
29
30 /* Protects all lists and tasklet structures. */
31 static DEFINE_SPINLOCK(tasklet_lock);
32
tasklet_enqueue(struct tasklet * t)33 static void tasklet_enqueue(struct tasklet *t)
34 {
35 unsigned int cpu = t->scheduled_on;
36
37 if ( t->is_softirq )
38 {
39 struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
40 bool_t was_empty = list_empty(list);
41 list_add_tail(&t->list, list);
42 if ( was_empty )
43 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
44 }
45 else
46 {
47 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
48 list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
49 if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
50 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
51 }
52 }
53
tasklet_schedule_on_cpu(struct tasklet * t,unsigned int cpu)54 void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
55 {
56 unsigned long flags;
57
58 spin_lock_irqsave(&tasklet_lock, flags);
59
60 if ( tasklets_initialised && !t->is_dead )
61 {
62 t->scheduled_on = cpu;
63 if ( !t->is_running )
64 {
65 list_del(&t->list);
66 tasklet_enqueue(t);
67 }
68 }
69
70 spin_unlock_irqrestore(&tasklet_lock, flags);
71 }
72
tasklet_schedule(struct tasklet * t)73 void tasklet_schedule(struct tasklet *t)
74 {
75 tasklet_schedule_on_cpu(t, smp_processor_id());
76 }
77
do_tasklet_work(unsigned int cpu,struct list_head * list)78 static void do_tasklet_work(unsigned int cpu, struct list_head *list)
79 {
80 struct tasklet *t;
81
82 if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
83 return;
84
85 t = list_entry(list->next, struct tasklet, list);
86 list_del_init(&t->list);
87
88 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
89 t->scheduled_on = -1;
90 t->is_running = 1;
91
92 spin_unlock_irq(&tasklet_lock);
93 sync_local_execstate();
94 t->func(t->data);
95 spin_lock_irq(&tasklet_lock);
96
97 t->is_running = 0;
98
99 if ( t->scheduled_on >= 0 )
100 {
101 BUG_ON(t->is_dead || !list_empty(&t->list));
102 tasklet_enqueue(t);
103 }
104 }
105
106 /* VCPU context work */
do_tasklet(void)107 void do_tasklet(void)
108 {
109 unsigned int cpu = smp_processor_id();
110 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
111 struct list_head *list = &per_cpu(tasklet_list, cpu);
112
113 /*
114 * We want to be sure any caller has checked that a tasklet is both
115 * enqueued and scheduled, before calling this. And, if the caller has
116 * actually checked, it's not an issue that we are outside of the
117 * critical region, in fact:
118 * - TASKLET_enqueued is cleared only here,
119 * - TASKLET_scheduled is only cleared when schedule() find it set,
120 * without TASKLET_enqueued being set as well.
121 */
122 ASSERT(tasklet_work_to_do(cpu));
123
124 spin_lock_irq(&tasklet_lock);
125
126 do_tasklet_work(cpu, list);
127
128 if ( list_empty(list) )
129 {
130 clear_bit(_TASKLET_enqueued, work_to_do);
131 raise_softirq(SCHEDULE_SOFTIRQ);
132 }
133
134 spin_unlock_irq(&tasklet_lock);
135 }
136
137 /* Softirq context work */
tasklet_softirq_action(void)138 static void tasklet_softirq_action(void)
139 {
140 unsigned int cpu = smp_processor_id();
141 struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
142
143 spin_lock_irq(&tasklet_lock);
144
145 do_tasklet_work(cpu, list);
146
147 if ( !list_empty(list) && !cpu_is_offline(cpu) )
148 raise_softirq(TASKLET_SOFTIRQ);
149
150 spin_unlock_irq(&tasklet_lock);
151 }
152
tasklet_kill(struct tasklet * t)153 void tasklet_kill(struct tasklet *t)
154 {
155 unsigned long flags;
156
157 spin_lock_irqsave(&tasklet_lock, flags);
158
159 if ( !list_empty(&t->list) )
160 {
161 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
162 list_del_init(&t->list);
163 }
164
165 t->scheduled_on = -1;
166 t->is_dead = 1;
167
168 while ( t->is_running )
169 {
170 spin_unlock_irqrestore(&tasklet_lock, flags);
171 cpu_relax();
172 spin_lock_irqsave(&tasklet_lock, flags);
173 }
174
175 spin_unlock_irqrestore(&tasklet_lock, flags);
176 }
177
migrate_tasklets_from_cpu(unsigned int cpu,struct list_head * list)178 static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list)
179 {
180 unsigned long flags;
181 struct tasklet *t;
182
183 spin_lock_irqsave(&tasklet_lock, flags);
184
185 while ( !list_empty(list) )
186 {
187 t = list_entry(list->next, struct tasklet, list);
188 BUG_ON(t->scheduled_on != cpu);
189 t->scheduled_on = smp_processor_id();
190 list_del(&t->list);
191 tasklet_enqueue(t);
192 }
193
194 spin_unlock_irqrestore(&tasklet_lock, flags);
195 }
196
tasklet_init(struct tasklet * t,void (* func)(unsigned long),unsigned long data)197 void tasklet_init(
198 struct tasklet *t, void (*func)(unsigned long), unsigned long data)
199 {
200 memset(t, 0, sizeof(*t));
201 INIT_LIST_HEAD(&t->list);
202 t->scheduled_on = -1;
203 t->func = func;
204 t->data = data;
205 }
206
softirq_tasklet_init(struct tasklet * t,void (* func)(unsigned long),unsigned long data)207 void softirq_tasklet_init(
208 struct tasklet *t, void (*func)(unsigned long), unsigned long data)
209 {
210 tasklet_init(t, func, data);
211 t->is_softirq = 1;
212 }
213
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)214 static int cpu_callback(
215 struct notifier_block *nfb, unsigned long action, void *hcpu)
216 {
217 unsigned int cpu = (unsigned long)hcpu;
218
219 switch ( action )
220 {
221 case CPU_UP_PREPARE:
222 INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
223 INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
224 break;
225 case CPU_UP_CANCELED:
226 case CPU_DEAD:
227 migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu));
228 migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu));
229 break;
230 default:
231 break;
232 }
233
234 return NOTIFY_DONE;
235 }
236
237 static struct notifier_block cpu_nfb = {
238 .notifier_call = cpu_callback,
239 .priority = 99
240 };
241
tasklet_subsys_init(void)242 void __init tasklet_subsys_init(void)
243 {
244 void *hcpu = (void *)(long)smp_processor_id();
245 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
246 register_cpu_notifier(&cpu_nfb);
247 open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action);
248 tasklets_initialised = 1;
249 }
250
251 /*
252 * Local variables:
253 * mode: C
254 * c-file-style: "BSD"
255 * c-basic-offset: 4
256 * tab-width: 4
257 * indent-tabs-mode: nil
258 * End:
259 */
260