1 /*
2 * Xen event channels (FIFO-based ABI)
3 *
4 * Copyright (C) 2013 Citrix Systems R&D ltd.
5 *
6 * This source code is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * Or, when distributed separately from the Linux kernel or
12 * incorporated into other software packages, subject to the following
13 * license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36 #include <linux/linkage.h>
37 #include <linux/interrupt.h>
38 #include <linux/irq.h>
39 #include <linux/smp.h>
40 #include <linux/percpu.h>
41 #include <linux/cpu.h>
42
43 #include <asm/barrier.h>
44 #include <asm/sync_bitops.h>
45 #include <asm/xen/hypercall.h>
46 #include <asm/xen/hypervisor.h>
47
48 #include <xen/xen.h>
49 #include <xen/xen-ops.h>
50 #include <xen/events.h>
51 #include <xen/interface/xen.h>
52 #include <xen/interface/event_channel.h>
53 #include <xen/page.h>
54
55 #include "events_internal.h"
56
57 #define EVENT_WORDS_PER_PAGE (XEN_PAGE_SIZE / sizeof(event_word_t))
58 #define MAX_EVENT_ARRAY_PAGES (EVTCHN_FIFO_NR_CHANNELS / EVENT_WORDS_PER_PAGE)
59
60 struct evtchn_fifo_queue {
61 uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
62 };
63
64 static DEFINE_PER_CPU(struct evtchn_fifo_control_block *, cpu_control_block);
65 static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
66 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
67 static unsigned event_array_pages __read_mostly;
68
69 /*
70 * sync_set_bit() and friends must be unsigned long aligned.
71 */
72 #if BITS_PER_LONG > 32
73
74 #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
75 #define EVTCHN_FIFO_BIT(b, w) \
76 (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
77
78 #else
79
80 #define BM(w) ((unsigned long *)(w))
81 #define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
82
83 #endif
84
event_word_from_port(evtchn_port_t port)85 static inline event_word_t *event_word_from_port(evtchn_port_t port)
86 {
87 unsigned i = port / EVENT_WORDS_PER_PAGE;
88
89 return event_array[i] + port % EVENT_WORDS_PER_PAGE;
90 }
91
evtchn_fifo_max_channels(void)92 static unsigned evtchn_fifo_max_channels(void)
93 {
94 return EVTCHN_FIFO_NR_CHANNELS;
95 }
96
evtchn_fifo_nr_channels(void)97 static unsigned evtchn_fifo_nr_channels(void)
98 {
99 return event_array_pages * EVENT_WORDS_PER_PAGE;
100 }
101
init_control_block(int cpu,struct evtchn_fifo_control_block * control_block)102 static int init_control_block(int cpu,
103 struct evtchn_fifo_control_block *control_block)
104 {
105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
106 struct evtchn_init_control init_control;
107 unsigned int i;
108
109 /* Reset the control block and the local HEADs. */
110 clear_page(control_block);
111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
112 q->head[i] = 0;
113
114 init_control.control_gfn = virt_to_gfn(control_block);
115 init_control.offset = 0;
116 init_control.vcpu = xen_vcpu_nr(cpu);
117
118 return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
119 }
120
free_unused_array_pages(void)121 static void free_unused_array_pages(void)
122 {
123 unsigned i;
124
125 for (i = event_array_pages; i < MAX_EVENT_ARRAY_PAGES; i++) {
126 if (!event_array[i])
127 break;
128 free_page((unsigned long)event_array[i]);
129 event_array[i] = NULL;
130 }
131 }
132
init_array_page(event_word_t * array_page)133 static void init_array_page(event_word_t *array_page)
134 {
135 unsigned i;
136
137 for (i = 0; i < EVENT_WORDS_PER_PAGE; i++)
138 array_page[i] = 1 << EVTCHN_FIFO_MASKED;
139 }
140
evtchn_fifo_setup(evtchn_port_t port)141 static int evtchn_fifo_setup(evtchn_port_t port)
142 {
143 unsigned new_array_pages;
144 int ret;
145
146 new_array_pages = port / EVENT_WORDS_PER_PAGE + 1;
147
148 if (new_array_pages > MAX_EVENT_ARRAY_PAGES)
149 return -EINVAL;
150
151 while (event_array_pages < new_array_pages) {
152 void *array_page;
153 struct evtchn_expand_array expand_array;
154
155 /* Might already have a page if we've resumed. */
156 array_page = event_array[event_array_pages];
157 if (!array_page) {
158 array_page = (void *)__get_free_page(GFP_KERNEL);
159 if (array_page == NULL) {
160 ret = -ENOMEM;
161 goto error;
162 }
163 event_array[event_array_pages] = array_page;
164 }
165
166 /* Mask all events in this page before adding it. */
167 init_array_page(array_page);
168
169 expand_array.array_gfn = virt_to_gfn(array_page);
170
171 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
172 if (ret < 0)
173 goto error;
174
175 event_array_pages++;
176 }
177 return 0;
178
179 error:
180 if (event_array_pages == 0)
181 panic("xen: unable to expand event array with initial page (%d)\n", ret);
182 else
183 pr_err("unable to expand event array (%d)\n", ret);
184 free_unused_array_pages();
185 return ret;
186 }
187
evtchn_fifo_bind_to_cpu(evtchn_port_t evtchn,unsigned int cpu,unsigned int old_cpu)188 static void evtchn_fifo_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
189 unsigned int old_cpu)
190 {
191 /* no-op */
192 }
193
evtchn_fifo_clear_pending(evtchn_port_t port)194 static void evtchn_fifo_clear_pending(evtchn_port_t port)
195 {
196 event_word_t *word = event_word_from_port(port);
197 sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
198 }
199
evtchn_fifo_set_pending(evtchn_port_t port)200 static void evtchn_fifo_set_pending(evtchn_port_t port)
201 {
202 event_word_t *word = event_word_from_port(port);
203 sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
204 }
205
evtchn_fifo_is_pending(evtchn_port_t port)206 static bool evtchn_fifo_is_pending(evtchn_port_t port)
207 {
208 event_word_t *word = event_word_from_port(port);
209 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
210 }
211
evtchn_fifo_mask(evtchn_port_t port)212 static void evtchn_fifo_mask(evtchn_port_t port)
213 {
214 event_word_t *word = event_word_from_port(port);
215 sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
216 }
217
evtchn_fifo_is_masked(evtchn_port_t port)218 static bool evtchn_fifo_is_masked(evtchn_port_t port)
219 {
220 event_word_t *word = event_word_from_port(port);
221 return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
222 }
223 /*
224 * Clear MASKED if not PENDING, spinning if BUSY is set.
225 * Return true if mask was cleared.
226 */
clear_masked_cond(volatile event_word_t * word)227 static bool clear_masked_cond(volatile event_word_t *word)
228 {
229 event_word_t new, old, w;
230
231 w = *word;
232
233 do {
234 if (!(w & (1 << EVTCHN_FIFO_MASKED)))
235 return true;
236
237 if (w & (1 << EVTCHN_FIFO_PENDING))
238 return false;
239
240 old = w & ~(1 << EVTCHN_FIFO_BUSY);
241 new = old & ~(1 << EVTCHN_FIFO_MASKED);
242 w = sync_cmpxchg(word, old, new);
243 } while (w != old);
244
245 return true;
246 }
247
evtchn_fifo_unmask(evtchn_port_t port)248 static void evtchn_fifo_unmask(evtchn_port_t port)
249 {
250 event_word_t *word = event_word_from_port(port);
251
252 BUG_ON(!irqs_disabled());
253
254 if (!clear_masked_cond(word)) {
255 struct evtchn_unmask unmask = { .port = port };
256 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
257 }
258 }
259
clear_linked(volatile event_word_t * word)260 static uint32_t clear_linked(volatile event_word_t *word)
261 {
262 event_word_t new, old, w;
263
264 w = *word;
265
266 do {
267 old = w;
268 new = (w & ~((1 << EVTCHN_FIFO_LINKED)
269 | EVTCHN_FIFO_LINK_MASK));
270 } while ((w = sync_cmpxchg(word, old, new)) != old);
271
272 return w & EVTCHN_FIFO_LINK_MASK;
273 }
274
consume_one_event(unsigned cpu,struct evtchn_loop_ctrl * ctrl,struct evtchn_fifo_control_block * control_block,unsigned priority,unsigned long * ready)275 static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
276 struct evtchn_fifo_control_block *control_block,
277 unsigned priority, unsigned long *ready)
278 {
279 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
280 uint32_t head;
281 evtchn_port_t port;
282 event_word_t *word;
283
284 head = q->head[priority];
285
286 /*
287 * Reached the tail last time? Read the new HEAD from the
288 * control block.
289 */
290 if (head == 0) {
291 virt_rmb(); /* Ensure word is up-to-date before reading head. */
292 head = control_block->head[priority];
293 }
294
295 port = head;
296 word = event_word_from_port(port);
297 head = clear_linked(word);
298
299 /*
300 * If the link is non-zero, there are more events in the
301 * queue, otherwise the queue is empty.
302 *
303 * If the queue is empty, clear this priority from our local
304 * copy of the ready word.
305 */
306 if (head == 0)
307 clear_bit(priority, ready);
308
309 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
310 if (unlikely(!ctrl))
311 pr_warn("Dropping pending event for port %u\n", port);
312 else
313 handle_irq_for_port(port, ctrl);
314 }
315
316 q->head[priority] = head;
317 }
318
__evtchn_fifo_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)319 static void __evtchn_fifo_handle_events(unsigned cpu,
320 struct evtchn_loop_ctrl *ctrl)
321 {
322 struct evtchn_fifo_control_block *control_block;
323 unsigned long ready;
324 unsigned q;
325
326 control_block = per_cpu(cpu_control_block, cpu);
327
328 ready = xchg(&control_block->ready, 0);
329
330 while (ready) {
331 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
332 consume_one_event(cpu, ctrl, control_block, q, &ready);
333 ready |= xchg(&control_block->ready, 0);
334 }
335 }
336
evtchn_fifo_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)337 static void evtchn_fifo_handle_events(unsigned cpu,
338 struct evtchn_loop_ctrl *ctrl)
339 {
340 __evtchn_fifo_handle_events(cpu, ctrl);
341 }
342
evtchn_fifo_resume(void)343 static void evtchn_fifo_resume(void)
344 {
345 unsigned cpu;
346
347 for_each_possible_cpu(cpu) {
348 void *control_block = per_cpu(cpu_control_block, cpu);
349 int ret;
350
351 if (!control_block)
352 continue;
353
354 /*
355 * If this CPU is offline, take the opportunity to
356 * free the control block while it is not being
357 * used.
358 */
359 if (!cpu_online(cpu)) {
360 free_page((unsigned long)control_block);
361 per_cpu(cpu_control_block, cpu) = NULL;
362 continue;
363 }
364
365 ret = init_control_block(cpu, control_block);
366 BUG_ON(ret < 0);
367 }
368
369 /*
370 * The event array starts out as empty again and is extended
371 * as normal when events are bound. The existing pages will
372 * be reused.
373 */
374 event_array_pages = 0;
375 }
376
evtchn_fifo_alloc_control_block(unsigned cpu)377 static int evtchn_fifo_alloc_control_block(unsigned cpu)
378 {
379 void *control_block = NULL;
380 int ret = -ENOMEM;
381
382 control_block = (void *)__get_free_page(GFP_KERNEL);
383 if (control_block == NULL)
384 goto error;
385
386 ret = init_control_block(cpu, control_block);
387 if (ret < 0)
388 goto error;
389
390 per_cpu(cpu_control_block, cpu) = control_block;
391
392 return 0;
393
394 error:
395 free_page((unsigned long)control_block);
396 return ret;
397 }
398
evtchn_fifo_percpu_init(unsigned int cpu)399 static int evtchn_fifo_percpu_init(unsigned int cpu)
400 {
401 if (!per_cpu(cpu_control_block, cpu))
402 return evtchn_fifo_alloc_control_block(cpu);
403 return 0;
404 }
405
evtchn_fifo_percpu_deinit(unsigned int cpu)406 static int evtchn_fifo_percpu_deinit(unsigned int cpu)
407 {
408 __evtchn_fifo_handle_events(cpu, NULL);
409 return 0;
410 }
411
412 static const struct evtchn_ops evtchn_ops_fifo = {
413 .max_channels = evtchn_fifo_max_channels,
414 .nr_channels = evtchn_fifo_nr_channels,
415 .setup = evtchn_fifo_setup,
416 .bind_to_cpu = evtchn_fifo_bind_to_cpu,
417 .clear_pending = evtchn_fifo_clear_pending,
418 .set_pending = evtchn_fifo_set_pending,
419 .is_pending = evtchn_fifo_is_pending,
420 .mask = evtchn_fifo_mask,
421 .unmask = evtchn_fifo_unmask,
422 .handle_events = evtchn_fifo_handle_events,
423 .resume = evtchn_fifo_resume,
424 .percpu_init = evtchn_fifo_percpu_init,
425 .percpu_deinit = evtchn_fifo_percpu_deinit,
426 };
427
xen_evtchn_fifo_init(void)428 int __init xen_evtchn_fifo_init(void)
429 {
430 int cpu = smp_processor_id();
431 int ret;
432
433 ret = evtchn_fifo_alloc_control_block(cpu);
434 if (ret < 0)
435 return ret;
436
437 pr_info("Using FIFO-based ABI\n");
438
439 evtchn_ops = &evtchn_ops_fifo;
440
441 return ret;
442 }
443