1 /*
2  * FIFO event channel management.
3  *
4  * Copyright (C) 2013 Citrix Systems R&D Ltd.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2 or later.  See the file COPYING for more details.
8  */
9 
10 #include "event_channel.h"
11 
12 #include <xen/init.h>
13 #include <xen/lib.h>
14 #include <xen/errno.h>
15 #include <xen/sched.h>
16 #include <xen/paging.h>
17 #include <xen/mm.h>
18 #include <xen/domain_page.h>
19 
20 #include <asm/guest_atomics.h>
21 
22 #include <public/event_channel.h>
23 
24 struct evtchn_fifo_queue {
25     uint32_t *head; /* points into control block */
26     uint32_t tail;
27     uint8_t priority;
28     spinlock_t lock;
29 };
30 
31 struct evtchn_fifo_vcpu {
32     struct evtchn_fifo_control_block *control_block;
33     struct evtchn_fifo_queue queue[EVTCHN_FIFO_MAX_QUEUES];
34 };
35 
36 #define EVTCHN_FIFO_EVENT_WORDS_PER_PAGE (PAGE_SIZE / sizeof(event_word_t))
37 #define EVTCHN_FIFO_MAX_EVENT_ARRAY_PAGES \
38     (EVTCHN_FIFO_NR_CHANNELS / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE)
39 
40 struct evtchn_fifo_domain {
41     event_word_t *event_array[EVTCHN_FIFO_MAX_EVENT_ARRAY_PAGES];
42     unsigned int num_evtchns;
43 };
44 
45 union evtchn_fifo_lastq {
46     uint32_t raw;
47     struct {
48         uint8_t last_priority;
49         uint16_t last_vcpu_id;
50     };
51 };
52 
evtchn_fifo_word_from_port(const struct domain * d,unsigned int port)53 static inline event_word_t *evtchn_fifo_word_from_port(const struct domain *d,
54                                                        unsigned int port)
55 {
56     unsigned int p, w;
57 
58     /*
59      * Callers aren't required to hold d->event_lock, so we need to synchronize
60      * with evtchn_fifo_init_control() setting d->evtchn_port_ops /after/
61      * d->evtchn_fifo.
62      */
63     smp_rmb();
64 
65     if ( unlikely(port >= d->evtchn_fifo->num_evtchns) )
66         return NULL;
67 
68     /*
69      * Callers aren't required to hold d->event_lock, so we need to synchronize
70      * with add_page_to_event_array().
71      */
72     smp_rmb();
73 
74     p = array_index_nospec(port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE,
75                            d->evtchn_fifo->num_evtchns);
76     w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
77 
78     return d->evtchn_fifo->event_array[p] + w;
79 }
80 
evtchn_fifo_init(struct domain * d,struct evtchn * evtchn)81 static void cf_check evtchn_fifo_init(struct domain *d, struct evtchn *evtchn)
82 {
83     event_word_t *word;
84 
85     evtchn->priority = EVTCHN_FIFO_PRIORITY_DEFAULT;
86 
87     /*
88      * If this event is still linked, the first event may be delivered
89      * on the wrong VCPU or with an unexpected priority.
90      */
91     word = evtchn_fifo_word_from_port(d, evtchn->port);
92     if ( word && guest_test_bit(d, EVTCHN_FIFO_LINKED, word) )
93         gdprintk(XENLOG_WARNING, "domain %d, port %d already on a queue\n",
94                  d->domain_id, evtchn->port);
95 }
96 
try_set_link(event_word_t * word,event_word_t * w,uint32_t link)97 static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link)
98 {
99     event_word_t new, old;
100 
101     if ( !(*w & (1 << EVTCHN_FIFO_LINKED)) )
102         return 0;
103 
104     old = *w;
105     new = (old & ~((1 << EVTCHN_FIFO_BUSY) | EVTCHN_FIFO_LINK_MASK)) | link;
106     *w = cmpxchg(word, old, new);
107     if ( *w == old )
108         return 1;
109 
110     return -EAGAIN;
111 }
112 
113 /*
114  * Atomically set the LINK field iff it is still LINKED.
115  *
116  * The guest is only permitted to make the following changes to a
117  * LINKED event.
118  *
119  * - set MASKED
120  * - clear MASKED
121  * - clear PENDING
122  * - clear LINKED (and LINK)
123  *
124  * We block unmasking by the guest by marking the tail word as BUSY,
125  * therefore, the cmpxchg() may fail at most 4 times.
126  */
evtchn_fifo_set_link(struct domain * d,event_word_t * word,uint32_t link)127 static bool evtchn_fifo_set_link(struct domain *d, event_word_t *word,
128                                  uint32_t link)
129 {
130     event_word_t w;
131     unsigned int try;
132     int ret;
133 
134     w = read_atomic(word);
135 
136     ret = try_set_link(word, &w, link);
137     if ( ret >= 0 )
138         return ret;
139 
140     /* Lock the word to prevent guest unmasking. */
141     guest_set_bit(d, EVTCHN_FIFO_BUSY, word);
142 
143     w = read_atomic(word);
144 
145     for ( try = 0; try < 4; try++ )
146     {
147         ret = try_set_link(word, &w, link);
148         if ( ret >= 0 )
149         {
150             if ( ret == 0 )
151                 guest_clear_bit(d, EVTCHN_FIFO_BUSY, word);
152             return ret;
153         }
154     }
155     gdprintk(XENLOG_WARNING, "domain %d, port %d not linked\n",
156              d->domain_id, link);
157     guest_clear_bit(d, EVTCHN_FIFO_BUSY, word);
158     return 1;
159 }
160 
evtchn_fifo_set_pending(struct vcpu * v,struct evtchn * evtchn)161 static void cf_check evtchn_fifo_set_pending(
162     struct vcpu *v, struct evtchn *evtchn)
163 {
164     struct domain *d = v->domain;
165     unsigned int port;
166     event_word_t *word;
167     unsigned long flags;
168     bool check_pollers = false;
169     struct evtchn_fifo_queue *q, *old_q;
170     unsigned int try;
171     bool linked = true;
172 
173     port = evtchn->port;
174     word = evtchn_fifo_word_from_port(d, port);
175 
176     /*
177      * Event array page may not exist yet, save the pending state for
178      * when the page is added.
179      */
180     if ( unlikely(!word) )
181     {
182         evtchn->pending = true;
183         return;
184     }
185 
186     /*
187      * Lock all queues related to the event channel (in case of a queue change
188      * this might be two).
189      * It is mandatory to do that before setting and testing the PENDING bit
190      * and to hold the current queue lock until the event has been put into the
191      * list of pending events in order to avoid waking up a guest without the
192      * event being visibly pending in the guest.
193      */
194     for ( try = 0; try < 3; try++ )
195     {
196         union evtchn_fifo_lastq lastq;
197         const struct vcpu *old_v;
198 
199         lastq.raw = read_atomic(&evtchn->fifo_lastq);
200         old_v = d->vcpu[lastq.last_vcpu_id];
201 
202         q = &v->evtchn_fifo->queue[evtchn->priority];
203         old_q = &old_v->evtchn_fifo->queue[lastq.last_priority];
204 
205         if ( q == old_q )
206             spin_lock_irqsave(&q->lock, flags);
207         else if ( q < old_q )
208         {
209             spin_lock_irqsave(&q->lock, flags);
210             spin_lock(&old_q->lock);
211         }
212         else
213         {
214             spin_lock_irqsave(&old_q->lock, flags);
215             spin_lock(&q->lock);
216         }
217 
218         lastq.raw = read_atomic(&evtchn->fifo_lastq);
219         old_v = d->vcpu[lastq.last_vcpu_id];
220         if ( q == &v->evtchn_fifo->queue[evtchn->priority] &&
221              old_q == &old_v->evtchn_fifo->queue[lastq.last_priority] )
222             break;
223 
224         if ( q != old_q )
225             spin_unlock(&old_q->lock);
226         spin_unlock_irqrestore(&q->lock, flags);
227     }
228 
229     /* If we didn't get the lock bail out. */
230     if ( try == 3 )
231     {
232         gprintk(XENLOG_WARNING,
233                 "%pd port %u lost event (too many queue changes)\n",
234                 d, evtchn->port);
235         goto done;
236     }
237 
238     /*
239      * Control block not mapped.  The guest must not unmask an
240      * event until the control block is initialized, so we can
241      * just drop the event.
242      */
243     if ( unlikely(!v->evtchn_fifo->control_block) )
244     {
245         printk(XENLOG_G_WARNING
246                "%pv has no FIFO event channel control block\n", v);
247         goto unlock;
248     }
249 
250     check_pollers = !guest_test_and_set_bit(d, EVTCHN_FIFO_PENDING, word);
251 
252     /*
253      * Link the event if it unmasked and not already linked.
254      */
255     if ( !guest_test_bit(d, EVTCHN_FIFO_MASKED, word) &&
256          /*
257           * This also acts as the read counterpart of the smp_wmb() in
258           * map_control_block().
259           */
260          !guest_test_and_set_bit(d, EVTCHN_FIFO_LINKED, word) )
261     {
262         /*
263          * If this event was a tail, the old queue is now empty and
264          * its tail must be invalidated to prevent adding an event to
265          * the old queue from corrupting the new queue.
266          */
267         if ( old_q->tail == port )
268             old_q->tail = 0;
269 
270         /* Moved to a different queue? */
271         if ( old_q != q )
272         {
273             union evtchn_fifo_lastq lastq = { };
274 
275             lastq.last_vcpu_id = v->vcpu_id;
276             lastq.last_priority = q->priority;
277             write_atomic(&evtchn->fifo_lastq, lastq.raw);
278 
279             spin_unlock(&old_q->lock);
280             old_q = q;
281         }
282 
283         /*
284          * Atomically link the tail to port iff the tail is linked.
285          * If the tail is unlinked the queue is empty.
286          *
287          * If port is the same as tail, the queue is empty but q->tail
288          * will appear linked as we just set LINKED above.
289          *
290          * If the queue is empty (i.e., we haven't linked to the new
291          * event), head must be updated.
292          */
293         linked = false;
294         if ( q->tail )
295         {
296             event_word_t *tail_word;
297 
298             tail_word = evtchn_fifo_word_from_port(d, q->tail);
299             linked = evtchn_fifo_set_link(d, tail_word, port);
300         }
301         if ( !linked )
302             write_atomic(q->head, port);
303         q->tail = port;
304     }
305 
306  unlock:
307     if ( q != old_q )
308         spin_unlock(&old_q->lock);
309     spin_unlock_irqrestore(&q->lock, flags);
310 
311  done:
312     if ( !linked &&
313          !guest_test_and_set_bit(d, q->priority,
314                                  &v->evtchn_fifo->control_block->ready) )
315         vcpu_mark_events_pending(v);
316 
317     if ( check_pollers )
318         evtchn_check_pollers(d, port);
319 }
320 
evtchn_fifo_clear_pending(struct domain * d,struct evtchn * evtchn)321 static void cf_check evtchn_fifo_clear_pending(
322     struct domain *d, struct evtchn *evtchn)
323 {
324     event_word_t *word;
325 
326     word = evtchn_fifo_word_from_port(d, evtchn->port);
327     if ( unlikely(!word) )
328         return;
329 
330     /*
331      * Just clear the P bit.
332      *
333      * No need to unlink as the guest will unlink and ignore
334      * non-pending events.
335      */
336     guest_clear_bit(d, EVTCHN_FIFO_PENDING, word);
337 }
338 
evtchn_fifo_unmask(struct domain * d,struct evtchn * evtchn)339 static void cf_check evtchn_fifo_unmask(struct domain *d, struct evtchn *evtchn)
340 {
341     struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
342     event_word_t *word;
343 
344     word = evtchn_fifo_word_from_port(d, evtchn->port);
345     if ( unlikely(!word) )
346         return;
347 
348     guest_clear_bit(d, EVTCHN_FIFO_MASKED, word);
349 
350     /* Relink if pending. */
351     if ( guest_test_bit(d, EVTCHN_FIFO_PENDING, word) )
352         evtchn_fifo_set_pending(v, evtchn);
353 }
354 
evtchn_fifo_is_pending(const struct domain * d,const struct evtchn * evtchn)355 static bool cf_check evtchn_fifo_is_pending(
356     const struct domain *d, const struct evtchn *evtchn)
357 {
358     const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
359 
360     return word && guest_test_bit(d, EVTCHN_FIFO_PENDING, word);
361 }
362 
evtchn_fifo_is_masked(const struct domain * d,const struct evtchn * evtchn)363 static bool cf_check evtchn_fifo_is_masked(
364     const struct domain *d, const struct evtchn *evtchn)
365 {
366     const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
367 
368     return !word || guest_test_bit(d, EVTCHN_FIFO_MASKED, word);
369 }
370 
evtchn_fifo_is_busy(const struct domain * d,const struct evtchn * evtchn)371 static bool cf_check evtchn_fifo_is_busy(
372     const struct domain *d, const struct evtchn *evtchn)
373 {
374     const event_word_t *word = evtchn_fifo_word_from_port(d, evtchn->port);
375 
376     return word && guest_test_bit(d, EVTCHN_FIFO_LINKED, word);
377 }
378 
evtchn_fifo_set_priority(struct domain * d,struct evtchn * evtchn,unsigned int priority)379 static int cf_check evtchn_fifo_set_priority(
380     struct domain *d, struct evtchn *evtchn, unsigned int priority)
381 {
382     if ( priority > EVTCHN_FIFO_PRIORITY_MIN )
383         return -EINVAL;
384 
385     /*
386      * Only need to switch to the new queue for future events. If the
387      * event is already pending or in the process of being linked it
388      * will be on the old queue -- this is fine.
389      */
390     evtchn->priority = priority;
391 
392     return 0;
393 }
394 
evtchn_fifo_print_state(struct domain * d,const struct evtchn * evtchn)395 static void cf_check evtchn_fifo_print_state(
396     struct domain *d, const struct evtchn *evtchn)
397 {
398     event_word_t *word;
399 
400     word = evtchn_fifo_word_from_port(d, evtchn->port);
401     if ( !word )
402         printk("?     ");
403     else if ( guest_test_bit(d, EVTCHN_FIFO_LINKED, word) )
404         printk("%c %-4u", guest_test_bit(d, EVTCHN_FIFO_BUSY, word) ? 'B' : ' ',
405                *word & EVTCHN_FIFO_LINK_MASK);
406     else
407         printk("%c -   ", guest_test_bit(d, EVTCHN_FIFO_BUSY, word) ? 'B' : ' ');
408 }
409 
410 static const struct evtchn_port_ops evtchn_port_ops_fifo =
411 {
412     .init          = evtchn_fifo_init,
413     .set_pending   = evtchn_fifo_set_pending,
414     .clear_pending = evtchn_fifo_clear_pending,
415     .unmask        = evtchn_fifo_unmask,
416     .is_pending    = evtchn_fifo_is_pending,
417     .is_masked     = evtchn_fifo_is_masked,
418     .is_busy       = evtchn_fifo_is_busy,
419     .set_priority  = evtchn_fifo_set_priority,
420     .print_state   = evtchn_fifo_print_state,
421 };
422 
map_guest_page(struct domain * d,uint64_t gfn,void ** virt)423 static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
424 {
425     struct page_info *p;
426 
427     p = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
428     if ( !p )
429         return -EINVAL;
430 
431     if ( !get_page_type(p, PGT_writable_page) )
432     {
433         put_page(p);
434         return -EINVAL;
435     }
436 
437     *virt = __map_domain_page_global(p);
438     if ( !*virt )
439     {
440         put_page_and_type(p);
441         return -ENOMEM;
442     }
443     return 0;
444 }
445 
unmap_guest_page(void * virt)446 static void unmap_guest_page(void *virt)
447 {
448     struct page_info *page;
449 
450     if ( !virt )
451         return;
452 
453     virt = (void *)((unsigned long)virt & PAGE_MASK);
454     page = mfn_to_page(domain_page_map_to_mfn(virt));
455 
456     unmap_domain_page_global(virt);
457     put_page_and_type(page);
458 }
459 
init_queue(struct vcpu * v,struct evtchn_fifo_queue * q,unsigned int i)460 static void init_queue(struct vcpu *v, struct evtchn_fifo_queue *q,
461                        unsigned int i)
462 {
463     spin_lock_init(&q->lock);
464     q->priority = i;
465 }
466 
setup_control_block(struct vcpu * v)467 static int setup_control_block(struct vcpu *v)
468 {
469     struct evtchn_fifo_vcpu *efv;
470     unsigned int i;
471 
472     efv = xzalloc(struct evtchn_fifo_vcpu);
473     if ( !efv )
474         return -ENOMEM;
475 
476     for ( i = 0; i <= EVTCHN_FIFO_PRIORITY_MIN; i++ )
477         init_queue(v, &efv->queue[i], i);
478 
479     v->evtchn_fifo = efv;
480 
481     return 0;
482 }
483 
map_control_block(struct vcpu * v,uint64_t gfn,uint32_t offset)484 static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
485 {
486     void *virt;
487     struct evtchn_fifo_control_block *control_block;
488     unsigned int i;
489     int rc;
490 
491     if ( v->evtchn_fifo->control_block )
492         return -EINVAL;
493 
494     rc = map_guest_page(v->domain, gfn, &virt);
495     if ( rc < 0 )
496         return rc;
497 
498     control_block = virt + offset;
499 
500     for ( i = 0; i <= EVTCHN_FIFO_PRIORITY_MIN; i++ )
501         v->evtchn_fifo->queue[i].head = &control_block->head[i];
502 
503     /* All queue heads must have been set before setting the control block. */
504     smp_wmb();
505 
506     v->evtchn_fifo->control_block = control_block;
507 
508     return 0;
509 }
510 
cleanup_control_block(struct vcpu * v)511 static void cleanup_control_block(struct vcpu *v)
512 {
513     if ( !v->evtchn_fifo )
514         return;
515 
516     unmap_guest_page(v->evtchn_fifo->control_block);
517     xfree(v->evtchn_fifo);
518     v->evtchn_fifo = NULL;
519 }
520 
521 /*
522  * Setup an event array with no pages.
523  */
setup_event_array(struct domain * d)524 static int setup_event_array(struct domain *d)
525 {
526     d->evtchn_fifo = xzalloc(struct evtchn_fifo_domain);
527     if ( !d->evtchn_fifo )
528         return -ENOMEM;
529 
530     return 0;
531 }
532 
cleanup_event_array(struct domain * d)533 static void cleanup_event_array(struct domain *d)
534 {
535     unsigned int i;
536 
537     if ( !d->evtchn_fifo )
538         return;
539 
540     for ( i = 0; i < EVTCHN_FIFO_MAX_EVENT_ARRAY_PAGES; i++ )
541         unmap_guest_page(d->evtchn_fifo->event_array[i]);
542     xfree(d->evtchn_fifo);
543     d->evtchn_fifo = NULL;
544 }
545 
setup_ports(struct domain * d,unsigned int prev_evtchns)546 static void setup_ports(struct domain *d, unsigned int prev_evtchns)
547 {
548     unsigned int port;
549 
550     /*
551      * For each port that is already bound:
552      *
553      * - save its pending state.
554      * - set default priority.
555      */
556     for ( port = 1; port < prev_evtchns; port++ )
557     {
558         struct evtchn *evtchn;
559 
560         if ( !port_is_valid(d, port) )
561             break;
562 
563         evtchn = evtchn_from_port(d, port);
564 
565         if ( guest_test_bit(d, port, &shared_info(d, evtchn_pending)) )
566             evtchn->pending = true;
567 
568         evtchn_fifo_set_priority(d, evtchn, EVTCHN_FIFO_PRIORITY_DEFAULT);
569     }
570 }
571 
evtchn_fifo_init_control(struct evtchn_init_control * init_control)572 int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
573 {
574     struct domain *d = current->domain;
575     uint32_t vcpu_id;
576     uint64_t gfn;
577     uint32_t offset;
578     struct vcpu *v;
579     int rc;
580 
581     init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
582 
583     vcpu_id = init_control->vcpu;
584     gfn     = init_control->control_gfn;
585     offset  = init_control->offset;
586 
587     if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
588         return -ENOENT;
589 
590     /* Must not cross page boundary. */
591     if ( offset > (PAGE_SIZE - sizeof(evtchn_fifo_control_block_t)) )
592         return -EINVAL;
593 
594     /*
595      * Make sure the guest controlled value offset is bounded even during
596      * speculative execution.
597      */
598     offset = array_index_nospec(offset,
599                            PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) + 1);
600 
601     /* Must be 8-bytes aligned. */
602     if ( offset & (8 - 1) )
603         return -EINVAL;
604 
605     write_lock(&d->event_lock);
606 
607     /*
608      * If this is the first control block, setup an empty event array
609      * and switch to the fifo port ops.
610      */
611     if ( !d->evtchn_fifo )
612     {
613         struct vcpu *vcb;
614         /* Latch the value before it changes during setup_event_array(). */
615         unsigned int prev_evtchns = max_evtchns(d);
616 
617         for_each_vcpu ( d, vcb ) {
618             rc = setup_control_block(vcb);
619             if ( rc < 0 )
620                 goto error;
621         }
622 
623         rc = setup_event_array(d);
624         if ( rc < 0 )
625             goto error;
626 
627         /*
628          * This call, as a side effect, synchronizes with
629          * evtchn_fifo_word_from_port().
630          */
631         rc = map_control_block(v, gfn, offset);
632         if ( rc < 0 )
633             goto error;
634 
635         d->evtchn_port_ops = &evtchn_port_ops_fifo;
636         setup_ports(d, prev_evtchns);
637     }
638     else
639         rc = map_control_block(v, gfn, offset);
640 
641     write_unlock(&d->event_lock);
642 
643     return rc;
644 
645  error:
646     evtchn_fifo_destroy(d);
647     write_unlock(&d->event_lock);
648     return rc;
649 }
650 
add_page_to_event_array(struct domain * d,unsigned long gfn)651 static int add_page_to_event_array(struct domain *d, unsigned long gfn)
652 {
653     void *virt;
654     unsigned int slot;
655     unsigned int port = d->evtchn_fifo->num_evtchns;
656     int rc;
657 
658     slot = d->evtchn_fifo->num_evtchns / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
659     if ( slot >= EVTCHN_FIFO_MAX_EVENT_ARRAY_PAGES )
660         return -ENOSPC;
661 
662     rc = map_guest_page(d, gfn, &virt);
663     if ( rc < 0 )
664         return rc;
665 
666     d->evtchn_fifo->event_array[slot] = virt;
667 
668     /* Synchronize with evtchn_fifo_word_from_port(). */
669     smp_wmb();
670 
671     d->evtchn_fifo->num_evtchns += EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
672 
673     /*
674      * Re-raise any events that were pending while this array page was
675      * missing.
676      */
677     for ( ; port < d->evtchn_fifo->num_evtchns; port++ )
678     {
679         struct evtchn *evtchn;
680 
681         if ( !port_is_valid(d, port) )
682             break;
683 
684         evtchn = evtchn_from_port(d, port);
685         if ( evtchn->pending )
686             evtchn_fifo_set_pending(d->vcpu[evtchn->notify_vcpu_id], evtchn);
687     }
688 
689     return 0;
690 }
691 
evtchn_fifo_expand_array(const struct evtchn_expand_array * expand_array)692 int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array)
693 {
694     struct domain *d = current->domain;
695     int rc;
696 
697     if ( !d->evtchn_fifo )
698         return -EOPNOTSUPP;
699 
700     write_lock(&d->event_lock);
701     rc = add_page_to_event_array(d, expand_array->array_gfn);
702     write_unlock(&d->event_lock);
703 
704     return rc;
705 }
706 
evtchn_fifo_destroy(struct domain * d)707 void evtchn_fifo_destroy(struct domain *d)
708 {
709     struct vcpu *v;
710 
711     for_each_vcpu( d, v )
712         cleanup_control_block(v);
713     cleanup_event_array(d);
714 }
715 
716 /*
717  * Local variables:
718  * mode: C
719  * c-file-style: "BSD"
720  * c-basic-offset: 4
721  * tab-width: 4
722  * indent-tabs-mode: nil
723  * End:
724  */
725