1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <xen/init.h>
18 #include <xen/lib.h>
19 #include <xen/errno.h>
20 #include <xen/sched.h>
21 #include <xen/event.h>
22 #include <xen/irq.h>
23 #include <xen/iocap.h>
24 #include <xen/compat.h>
25 #include <xen/guest_access.h>
26 #include <xen/keyhandler.h>
27 #include <xen/event_fifo.h>
28 #include <asm/current.h>
29
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <xsm/xsm.h>
33
34 #define ERROR_EXIT(_errno) \
35 do { \
36 gdprintk(XENLOG_WARNING, \
37 "EVTCHNOP failure: error %d\n", \
38 (_errno)); \
39 rc = (_errno); \
40 goto out; \
41 } while ( 0 )
42 #define ERROR_EXIT_DOM(_errno, _dom) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: domain %d, error %d\n", \
46 (_dom)->domain_id, (_errno)); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
50
51 #define consumer_is_xen(e) (!!(e)->xen_consumer)
52
53 /*
54 * The function alloc_unbound_xen_event_channel() allows an arbitrary
55 * notifier function to be specified. However, very few unique functions
56 * are specified in practice, so to prevent bloating the evtchn structure
57 * with a pointer, we stash them dynamically in a small lookup array which
58 * can be indexed by a small integer.
59 */
60 static xen_event_channel_notification_t xen_consumers[NR_XEN_CONSUMERS];
61
62 /* Default notification action: wake up from wait_on_xen_event_channel(). */
default_xen_notification_fn(struct vcpu * v,unsigned int port)63 static void default_xen_notification_fn(struct vcpu *v, unsigned int port)
64 {
65 /* Consumer needs notification only if blocked. */
66 if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) )
67 vcpu_wake(v);
68 }
69
70 /*
71 * Given a notification function, return the value to stash in
72 * the evtchn->xen_consumer field.
73 */
get_xen_consumer(xen_event_channel_notification_t fn)74 static uint8_t get_xen_consumer(xen_event_channel_notification_t fn)
75 {
76 unsigned int i;
77
78 if ( fn == NULL )
79 fn = default_xen_notification_fn;
80
81 for ( i = 0; i < ARRAY_SIZE(xen_consumers); i++ )
82 {
83 if ( xen_consumers[i] == NULL )
84 xen_consumers[i] = fn;
85 if ( xen_consumers[i] == fn )
86 break;
87 }
88
89 BUG_ON(i >= ARRAY_SIZE(xen_consumers));
90 return i+1;
91 }
92
93 /* Get the notification function for a given Xen-bound event channel. */
94 #define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
95
virq_is_global(uint32_t virq)96 static int virq_is_global(uint32_t virq)
97 {
98 int rc;
99
100 ASSERT(virq < NR_VIRQS);
101
102 switch ( virq )
103 {
104 case VIRQ_TIMER:
105 case VIRQ_DEBUG:
106 case VIRQ_XENOPROF:
107 case VIRQ_XENPMU:
108 rc = 0;
109 break;
110 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
111 rc = arch_virq_is_global(virq);
112 break;
113 default:
114 rc = 1;
115 break;
116 }
117
118 return rc;
119 }
120
121
alloc_evtchn_bucket(struct domain * d,unsigned int port)122 static struct evtchn *alloc_evtchn_bucket(struct domain *d, unsigned int port)
123 {
124 struct evtchn *chn;
125 unsigned int i;
126
127 chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
128 if ( !chn )
129 return NULL;
130
131 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
132 {
133 if ( xsm_alloc_security_evtchn(&chn[i]) )
134 {
135 while ( i-- )
136 xsm_free_security_evtchn(&chn[i]);
137 xfree(chn);
138 return NULL;
139 }
140 chn[i].port = port + i;
141 spin_lock_init(&chn[i].lock);
142 }
143 return chn;
144 }
145
free_evtchn_bucket(struct domain * d,struct evtchn * bucket)146 static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket)
147 {
148 unsigned int i;
149
150 if ( !bucket )
151 return;
152
153 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
154 xsm_free_security_evtchn(bucket + i);
155
156 xfree(bucket);
157 }
158
evtchn_allocate_port(struct domain * d,evtchn_port_t port)159 int evtchn_allocate_port(struct domain *d, evtchn_port_t port)
160 {
161 if ( port > d->max_evtchn_port || port >= d->max_evtchns )
162 return -ENOSPC;
163
164 if ( port_is_valid(d, port) )
165 {
166 if ( evtchn_from_port(d, port)->state != ECS_FREE ||
167 evtchn_port_is_busy(d, port) )
168 return -EBUSY;
169 }
170 else
171 {
172 struct evtchn *chn;
173 struct evtchn **grp;
174
175 if ( !group_from_port(d, port) )
176 {
177 grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
178 if ( !grp )
179 return -ENOMEM;
180 group_from_port(d, port) = grp;
181 }
182
183 chn = alloc_evtchn_bucket(d, port);
184 if ( !chn )
185 return -ENOMEM;
186 bucket_from_port(d, port) = chn;
187
188 write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
189 }
190
191 return 0;
192 }
193
get_free_port(struct domain * d)194 static int get_free_port(struct domain *d)
195 {
196 int port;
197
198 if ( d->is_dying )
199 return -EINVAL;
200
201 for ( port = 0; port <= d->max_evtchn_port; port++ )
202 {
203 int rc = evtchn_allocate_port(d, port);
204
205 if ( rc == -EBUSY )
206 continue;
207
208 return port;
209 }
210
211 return -ENOSPC;
212 }
213
evtchn_free(struct domain * d,struct evtchn * chn)214 void evtchn_free(struct domain *d, struct evtchn *chn)
215 {
216 /* Clear pending event to avoid unexpected behavior on re-bind. */
217 evtchn_port_clear_pending(d, chn);
218
219 /* Reset binding to vcpu0 when the channel is freed. */
220 chn->state = ECS_FREE;
221 chn->notify_vcpu_id = 0;
222 chn->xen_consumer = 0;
223
224 xsm_evtchn_close_post(chn);
225 }
226
evtchn_alloc_unbound(evtchn_alloc_unbound_t * alloc)227 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
228 {
229 struct evtchn *chn;
230 struct domain *d;
231 int port;
232 domid_t dom = alloc->dom;
233 long rc;
234
235 d = rcu_lock_domain_by_any_id(dom);
236 if ( d == NULL )
237 return -ESRCH;
238
239 spin_lock(&d->event_lock);
240
241 if ( (port = get_free_port(d)) < 0 )
242 ERROR_EXIT_DOM(port, d);
243 chn = evtchn_from_port(d, port);
244
245 rc = xsm_evtchn_unbound(XSM_TARGET, d, chn, alloc->remote_dom);
246 if ( rc )
247 goto out;
248
249 spin_lock(&chn->lock);
250
251 chn->state = ECS_UNBOUND;
252 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
253 chn->u.unbound.remote_domid = current->domain->domain_id;
254 evtchn_port_init(d, chn);
255
256 spin_unlock(&chn->lock);
257
258 alloc->port = port;
259
260 out:
261 spin_unlock(&d->event_lock);
262 rcu_unlock_domain(d);
263
264 return rc;
265 }
266
267
double_evtchn_lock(struct evtchn * lchn,struct evtchn * rchn)268 static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn)
269 {
270 if ( lchn < rchn )
271 {
272 spin_lock(&lchn->lock);
273 spin_lock(&rchn->lock);
274 }
275 else
276 {
277 if ( lchn != rchn )
278 spin_lock(&rchn->lock);
279 spin_lock(&lchn->lock);
280 }
281 }
282
double_evtchn_unlock(struct evtchn * lchn,struct evtchn * rchn)283 static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn)
284 {
285 spin_unlock(&lchn->lock);
286 if ( lchn != rchn )
287 spin_unlock(&rchn->lock);
288 }
289
evtchn_bind_interdomain(evtchn_bind_interdomain_t * bind)290 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
291 {
292 struct evtchn *lchn, *rchn;
293 struct domain *ld = current->domain, *rd;
294 int lport, rport = bind->remote_port;
295 domid_t rdom = bind->remote_dom;
296 long rc;
297
298 if ( rdom == DOMID_SELF )
299 rdom = current->domain->domain_id;
300
301 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
302 return -ESRCH;
303
304 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
305 if ( ld < rd )
306 {
307 spin_lock(&ld->event_lock);
308 spin_lock(&rd->event_lock);
309 }
310 else
311 {
312 if ( ld != rd )
313 spin_lock(&rd->event_lock);
314 spin_lock(&ld->event_lock);
315 }
316
317 if ( (lport = get_free_port(ld)) < 0 )
318 ERROR_EXIT(lport);
319 lchn = evtchn_from_port(ld, lport);
320
321 if ( !port_is_valid(rd, rport) )
322 ERROR_EXIT_DOM(-EINVAL, rd);
323 rchn = evtchn_from_port(rd, rport);
324 if ( (rchn->state != ECS_UNBOUND) ||
325 (rchn->u.unbound.remote_domid != ld->domain_id) )
326 ERROR_EXIT_DOM(-EINVAL, rd);
327
328 rc = xsm_evtchn_interdomain(XSM_HOOK, ld, lchn, rd, rchn);
329 if ( rc )
330 goto out;
331
332 double_evtchn_lock(lchn, rchn);
333
334 lchn->u.interdomain.remote_dom = rd;
335 lchn->u.interdomain.remote_port = rport;
336 lchn->state = ECS_INTERDOMAIN;
337 evtchn_port_init(ld, lchn);
338
339 rchn->u.interdomain.remote_dom = ld;
340 rchn->u.interdomain.remote_port = lport;
341 rchn->state = ECS_INTERDOMAIN;
342
343 /*
344 * We may have lost notifications on the remote unbound port. Fix that up
345 * here by conservatively always setting a notification on the local port.
346 */
347 evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
348
349 double_evtchn_unlock(lchn, rchn);
350
351 bind->local_port = lport;
352
353 out:
354 spin_unlock(&ld->event_lock);
355 if ( ld != rd )
356 spin_unlock(&rd->event_lock);
357
358 rcu_unlock_domain(rd);
359
360 return rc;
361 }
362
363
evtchn_bind_virq(evtchn_bind_virq_t * bind,evtchn_port_t port)364 int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port)
365 {
366 struct evtchn *chn;
367 struct vcpu *v;
368 struct domain *d = current->domain;
369 int virq = bind->virq, vcpu = bind->vcpu;
370 int rc = 0;
371
372 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
373 return -EINVAL;
374
375 if ( virq_is_global(virq) && (vcpu != 0) )
376 return -EINVAL;
377
378 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
379 ((v = d->vcpu[vcpu]) == NULL) )
380 return -ENOENT;
381
382 spin_lock(&d->event_lock);
383
384 if ( v->virq_to_evtchn[virq] != 0 )
385 ERROR_EXIT(-EEXIST);
386
387 if ( port != 0 )
388 {
389 if ( (rc = evtchn_allocate_port(d, port)) != 0 )
390 ERROR_EXIT(rc);
391 }
392 else
393 {
394 int alloc_port = get_free_port(d);
395
396 if ( alloc_port < 0 )
397 ERROR_EXIT(alloc_port);
398 port = alloc_port;
399 }
400
401 chn = evtchn_from_port(d, port);
402
403 spin_lock(&chn->lock);
404
405 chn->state = ECS_VIRQ;
406 chn->notify_vcpu_id = vcpu;
407 chn->u.virq = virq;
408 evtchn_port_init(d, chn);
409
410 spin_unlock(&chn->lock);
411
412 v->virq_to_evtchn[virq] = bind->port = port;
413
414 out:
415 spin_unlock(&d->event_lock);
416
417 return rc;
418 }
419
420
evtchn_bind_ipi(evtchn_bind_ipi_t * bind)421 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
422 {
423 struct evtchn *chn;
424 struct domain *d = current->domain;
425 int port, vcpu = bind->vcpu;
426 long rc = 0;
427
428 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
429 (d->vcpu[vcpu] == NULL) )
430 return -ENOENT;
431
432 spin_lock(&d->event_lock);
433
434 if ( (port = get_free_port(d)) < 0 )
435 ERROR_EXIT(port);
436
437 chn = evtchn_from_port(d, port);
438
439 spin_lock(&chn->lock);
440
441 chn->state = ECS_IPI;
442 chn->notify_vcpu_id = vcpu;
443 evtchn_port_init(d, chn);
444
445 spin_unlock(&chn->lock);
446
447 bind->port = port;
448
449 out:
450 spin_unlock(&d->event_lock);
451
452 return rc;
453 }
454
455
link_pirq_port(int port,struct evtchn * chn,struct vcpu * v)456 static void link_pirq_port(int port, struct evtchn *chn, struct vcpu *v)
457 {
458 chn->u.pirq.prev_port = 0;
459 chn->u.pirq.next_port = v->pirq_evtchn_head;
460 if ( v->pirq_evtchn_head )
461 evtchn_from_port(v->domain, v->pirq_evtchn_head)
462 ->u.pirq.prev_port = port;
463 v->pirq_evtchn_head = port;
464 }
465
unlink_pirq_port(struct evtchn * chn,struct vcpu * v)466 static void unlink_pirq_port(struct evtchn *chn, struct vcpu *v)
467 {
468 struct domain *d = v->domain;
469
470 if ( chn->u.pirq.prev_port )
471 evtchn_from_port(d, chn->u.pirq.prev_port)->u.pirq.next_port =
472 chn->u.pirq.next_port;
473 else
474 v->pirq_evtchn_head = chn->u.pirq.next_port;
475 if ( chn->u.pirq.next_port )
476 evtchn_from_port(d, chn->u.pirq.next_port)->u.pirq.prev_port =
477 chn->u.pirq.prev_port;
478 }
479
480
evtchn_bind_pirq(evtchn_bind_pirq_t * bind)481 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
482 {
483 struct evtchn *chn;
484 struct domain *d = current->domain;
485 struct vcpu *v = d->vcpu[0];
486 struct pirq *info;
487 int port, pirq = bind->pirq;
488 long rc;
489
490 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
491 return -EINVAL;
492
493 if ( !is_hvm_domain(d) && !pirq_access_permitted(d, pirq) )
494 return -EPERM;
495
496 spin_lock(&d->event_lock);
497
498 if ( pirq_to_evtchn(d, pirq) != 0 )
499 ERROR_EXIT(-EEXIST);
500
501 if ( (port = get_free_port(d)) < 0 )
502 ERROR_EXIT(port);
503
504 chn = evtchn_from_port(d, port);
505
506 info = pirq_get_info(d, pirq);
507 if ( !info )
508 ERROR_EXIT(-ENOMEM);
509 info->evtchn = port;
510 rc = (!is_hvm_domain(d)
511 ? pirq_guest_bind(v, info,
512 !!(bind->flags & BIND_PIRQ__WILL_SHARE))
513 : 0);
514 if ( rc != 0 )
515 {
516 info->evtchn = 0;
517 pirq_cleanup_check(info, d);
518 goto out;
519 }
520
521 spin_lock(&chn->lock);
522
523 chn->state = ECS_PIRQ;
524 chn->u.pirq.irq = pirq;
525 link_pirq_port(port, chn, v);
526 evtchn_port_init(d, chn);
527
528 spin_unlock(&chn->lock);
529
530 bind->port = port;
531
532 arch_evtchn_bind_pirq(d, pirq);
533
534 out:
535 spin_unlock(&d->event_lock);
536
537 return rc;
538 }
539
540
evtchn_close(struct domain * d1,int port1,bool guest)541 int evtchn_close(struct domain *d1, int port1, bool guest)
542 {
543 struct domain *d2 = NULL;
544 struct vcpu *v;
545 struct evtchn *chn1, *chn2;
546 int port2;
547 long rc = 0;
548
549 again:
550 spin_lock(&d1->event_lock);
551
552 if ( !port_is_valid(d1, port1) )
553 {
554 rc = -EINVAL;
555 goto out;
556 }
557
558 chn1 = evtchn_from_port(d1, port1);
559
560 /* Guest cannot close a Xen-attached event channel. */
561 if ( unlikely(consumer_is_xen(chn1)) && guest )
562 {
563 rc = -EINVAL;
564 goto out;
565 }
566
567 switch ( chn1->state )
568 {
569 case ECS_FREE:
570 case ECS_RESERVED:
571 rc = -EINVAL;
572 goto out;
573
574 case ECS_UNBOUND:
575 break;
576
577 case ECS_PIRQ: {
578 struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq);
579
580 if ( !pirq )
581 break;
582 if ( !is_hvm_domain(d1) )
583 pirq_guest_unbind(d1, pirq);
584 pirq->evtchn = 0;
585 pirq_cleanup_check(pirq, d1);
586 unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
587 #ifdef CONFIG_X86
588 if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 )
589 unmap_domain_pirq_emuirq(d1, pirq->pirq);
590 #endif
591 break;
592 }
593
594 case ECS_VIRQ:
595 for_each_vcpu ( d1, v )
596 {
597 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
598 continue;
599 v->virq_to_evtchn[chn1->u.virq] = 0;
600 spin_barrier(&v->virq_lock);
601 }
602 break;
603
604 case ECS_IPI:
605 break;
606
607 case ECS_INTERDOMAIN:
608 if ( d2 == NULL )
609 {
610 d2 = chn1->u.interdomain.remote_dom;
611
612 /* If we unlock d1 then we could lose d2. Must get a reference. */
613 if ( unlikely(!get_domain(d2)) )
614 BUG();
615
616 if ( d1 < d2 )
617 {
618 spin_lock(&d2->event_lock);
619 }
620 else if ( d1 != d2 )
621 {
622 spin_unlock(&d1->event_lock);
623 spin_lock(&d2->event_lock);
624 goto again;
625 }
626 }
627 else if ( d2 != chn1->u.interdomain.remote_dom )
628 {
629 /*
630 * We can only get here if the port was closed and re-bound after
631 * unlocking d1 but before locking d2 above. We could retry but
632 * it is easier to return the same error as if we had seen the
633 * port in ECS_CLOSED. It must have passed through that state for
634 * us to end up here, so it's a valid error to return.
635 */
636 rc = -EINVAL;
637 goto out;
638 }
639
640 port2 = chn1->u.interdomain.remote_port;
641 BUG_ON(!port_is_valid(d2, port2));
642
643 chn2 = evtchn_from_port(d2, port2);
644 BUG_ON(chn2->state != ECS_INTERDOMAIN);
645 BUG_ON(chn2->u.interdomain.remote_dom != d1);
646
647 double_evtchn_lock(chn1, chn2);
648
649 evtchn_free(d1, chn1);
650
651 chn2->state = ECS_UNBOUND;
652 chn2->u.unbound.remote_domid = d1->domain_id;
653
654 double_evtchn_unlock(chn1, chn2);
655
656 goto out;
657
658 default:
659 BUG();
660 }
661
662 spin_lock(&chn1->lock);
663 evtchn_free(d1, chn1);
664 spin_unlock(&chn1->lock);
665
666 out:
667 if ( d2 != NULL )
668 {
669 if ( d1 != d2 )
670 spin_unlock(&d2->event_lock);
671 put_domain(d2);
672 }
673
674 spin_unlock(&d1->event_lock);
675
676 return rc;
677 }
678
evtchn_send(struct domain * ld,unsigned int lport)679 int evtchn_send(struct domain *ld, unsigned int lport)
680 {
681 struct evtchn *lchn, *rchn;
682 struct domain *rd;
683 int rport, ret = 0;
684
685 if ( !port_is_valid(ld, lport) )
686 return -EINVAL;
687
688 lchn = evtchn_from_port(ld, lport);
689
690 spin_lock(&lchn->lock);
691
692 /* Guest cannot send via a Xen-attached event channel. */
693 if ( unlikely(consumer_is_xen(lchn)) )
694 {
695 ret = -EINVAL;
696 goto out;
697 }
698
699 ret = xsm_evtchn_send(XSM_HOOK, ld, lchn);
700 if ( ret )
701 goto out;
702
703 switch ( lchn->state )
704 {
705 case ECS_INTERDOMAIN:
706 rd = lchn->u.interdomain.remote_dom;
707 rport = lchn->u.interdomain.remote_port;
708 rchn = evtchn_from_port(rd, rport);
709 if ( consumer_is_xen(rchn) )
710 xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport);
711 else
712 evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
713 break;
714 case ECS_IPI:
715 evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
716 break;
717 case ECS_UNBOUND:
718 /* silently drop the notification */
719 break;
720 default:
721 ret = -EINVAL;
722 }
723
724 out:
725 spin_unlock(&lchn->lock);
726
727 return ret;
728 }
729
guest_enabled_event(struct vcpu * v,uint32_t virq)730 int guest_enabled_event(struct vcpu *v, uint32_t virq)
731 {
732 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
733 }
734
send_guest_vcpu_virq(struct vcpu * v,uint32_t virq)735 void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
736 {
737 unsigned long flags;
738 int port;
739 struct domain *d;
740
741 ASSERT(!virq_is_global(virq));
742
743 spin_lock_irqsave(&v->virq_lock, flags);
744
745 port = v->virq_to_evtchn[virq];
746 if ( unlikely(port == 0) )
747 goto out;
748
749 d = v->domain;
750 evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
751
752 out:
753 spin_unlock_irqrestore(&v->virq_lock, flags);
754 }
755
send_guest_global_virq(struct domain * d,uint32_t virq)756 static void send_guest_global_virq(struct domain *d, uint32_t virq)
757 {
758 unsigned long flags;
759 int port;
760 struct vcpu *v;
761 struct evtchn *chn;
762
763 ASSERT(virq_is_global(virq));
764
765 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
766 return;
767
768 v = d->vcpu[0];
769 if ( unlikely(v == NULL) )
770 return;
771
772 spin_lock_irqsave(&v->virq_lock, flags);
773
774 port = v->virq_to_evtchn[virq];
775 if ( unlikely(port == 0) )
776 goto out;
777
778 chn = evtchn_from_port(d, port);
779 evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
780
781 out:
782 spin_unlock_irqrestore(&v->virq_lock, flags);
783 }
784
send_guest_pirq(struct domain * d,const struct pirq * pirq)785 void send_guest_pirq(struct domain *d, const struct pirq *pirq)
786 {
787 int port;
788 struct evtchn *chn;
789
790 /*
791 * PV guests: It should not be possible to race with __evtchn_close(). The
792 * caller of this function must synchronise with pirq_guest_unbind().
793 * HVM guests: Port is legitimately zero when the guest disables the
794 * emulated interrupt/evtchn.
795 */
796 if ( pirq == NULL || (port = pirq->evtchn) == 0 )
797 {
798 BUG_ON(!is_hvm_domain(d));
799 return;
800 }
801
802 chn = evtchn_from_port(d, port);
803 evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
804 }
805
806 static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
807
808 static DEFINE_SPINLOCK(global_virq_handlers_lock);
809
send_global_virq(uint32_t virq)810 void send_global_virq(uint32_t virq)
811 {
812 ASSERT(virq < NR_VIRQS);
813 ASSERT(virq_is_global(virq));
814
815 send_guest_global_virq(global_virq_handlers[virq] ?: hardware_domain, virq);
816 }
817
set_global_virq_handler(struct domain * d,uint32_t virq)818 int set_global_virq_handler(struct domain *d, uint32_t virq)
819 {
820 struct domain *old;
821
822 if (virq >= NR_VIRQS)
823 return -EINVAL;
824 if (!virq_is_global(virq))
825 return -EINVAL;
826
827 if (global_virq_handlers[virq] == d)
828 return 0;
829
830 if (unlikely(!get_domain(d)))
831 return -EINVAL;
832
833 spin_lock(&global_virq_handlers_lock);
834 old = global_virq_handlers[virq];
835 global_virq_handlers[virq] = d;
836 spin_unlock(&global_virq_handlers_lock);
837
838 if (old != NULL)
839 put_domain(old);
840
841 return 0;
842 }
843
clear_global_virq_handlers(struct domain * d)844 static void clear_global_virq_handlers(struct domain *d)
845 {
846 uint32_t virq;
847 int put_count = 0;
848
849 spin_lock(&global_virq_handlers_lock);
850
851 for (virq = 0; virq < NR_VIRQS; virq++)
852 {
853 if (global_virq_handlers[virq] == d)
854 {
855 global_virq_handlers[virq] = NULL;
856 put_count++;
857 }
858 }
859
860 spin_unlock(&global_virq_handlers_lock);
861
862 while (put_count)
863 {
864 put_domain(d);
865 put_count--;
866 }
867 }
868
evtchn_status(evtchn_status_t * status)869 int evtchn_status(evtchn_status_t *status)
870 {
871 struct domain *d;
872 domid_t dom = status->dom;
873 int port = status->port;
874 struct evtchn *chn;
875 long rc = 0;
876
877 d = rcu_lock_domain_by_any_id(dom);
878 if ( d == NULL )
879 return -ESRCH;
880
881 spin_lock(&d->event_lock);
882
883 if ( !port_is_valid(d, port) )
884 {
885 rc = -EINVAL;
886 goto out;
887 }
888
889 chn = evtchn_from_port(d, port);
890
891 rc = xsm_evtchn_status(XSM_TARGET, d, chn);
892 if ( rc )
893 goto out;
894
895 switch ( chn->state )
896 {
897 case ECS_FREE:
898 case ECS_RESERVED:
899 status->status = EVTCHNSTAT_closed;
900 break;
901 case ECS_UNBOUND:
902 status->status = EVTCHNSTAT_unbound;
903 status->u.unbound.dom = chn->u.unbound.remote_domid;
904 break;
905 case ECS_INTERDOMAIN:
906 status->status = EVTCHNSTAT_interdomain;
907 status->u.interdomain.dom =
908 chn->u.interdomain.remote_dom->domain_id;
909 status->u.interdomain.port = chn->u.interdomain.remote_port;
910 break;
911 case ECS_PIRQ:
912 status->status = EVTCHNSTAT_pirq;
913 status->u.pirq = chn->u.pirq.irq;
914 break;
915 case ECS_VIRQ:
916 status->status = EVTCHNSTAT_virq;
917 status->u.virq = chn->u.virq;
918 break;
919 case ECS_IPI:
920 status->status = EVTCHNSTAT_ipi;
921 break;
922 default:
923 BUG();
924 }
925
926 status->vcpu = chn->notify_vcpu_id;
927
928 out:
929 spin_unlock(&d->event_lock);
930 rcu_unlock_domain(d);
931
932 return rc;
933 }
934
935
evtchn_bind_vcpu(unsigned int port,unsigned int vcpu_id)936 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
937 {
938 struct domain *d = current->domain;
939 struct evtchn *chn;
940 long rc = 0;
941
942 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
943 return -ENOENT;
944
945 spin_lock(&d->event_lock);
946
947 if ( !port_is_valid(d, port) )
948 {
949 rc = -EINVAL;
950 goto out;
951 }
952
953 chn = evtchn_from_port(d, port);
954
955 /* Guest cannot re-bind a Xen-attached event channel. */
956 if ( unlikely(consumer_is_xen(chn)) )
957 {
958 rc = -EINVAL;
959 goto out;
960 }
961
962 switch ( chn->state )
963 {
964 case ECS_VIRQ:
965 if ( virq_is_global(chn->u.virq) )
966 chn->notify_vcpu_id = vcpu_id;
967 else
968 rc = -EINVAL;
969 break;
970 case ECS_UNBOUND:
971 case ECS_INTERDOMAIN:
972 chn->notify_vcpu_id = vcpu_id;
973 break;
974 case ECS_PIRQ:
975 if ( chn->notify_vcpu_id == vcpu_id )
976 break;
977 unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
978 chn->notify_vcpu_id = vcpu_id;
979 pirq_set_affinity(d, chn->u.pirq.irq,
980 cpumask_of(d->vcpu[vcpu_id]->processor));
981 link_pirq_port(port, chn, d->vcpu[vcpu_id]);
982 break;
983 default:
984 rc = -EINVAL;
985 break;
986 }
987
988 out:
989 spin_unlock(&d->event_lock);
990
991 return rc;
992 }
993
994
evtchn_unmask(unsigned int port)995 int evtchn_unmask(unsigned int port)
996 {
997 struct domain *d = current->domain;
998 struct evtchn *evtchn;
999
1000 if ( unlikely(!port_is_valid(d, port)) )
1001 return -EINVAL;
1002
1003 evtchn = evtchn_from_port(d, port);
1004 evtchn_port_unmask(d, evtchn);
1005
1006 return 0;
1007 }
1008
1009
evtchn_reset(struct domain * d)1010 int evtchn_reset(struct domain *d)
1011 {
1012 unsigned int i;
1013
1014 if ( d != current->domain && !d->controller_pause_count )
1015 return -EINVAL;
1016
1017 for ( i = 0; port_is_valid(d, i); i++ )
1018 evtchn_close(d, i, 1);
1019
1020 spin_lock(&d->event_lock);
1021
1022 if ( d->evtchn_fifo )
1023 {
1024 /* Switching back to 2-level ABI. */
1025 evtchn_fifo_destroy(d);
1026 evtchn_2l_init(d);
1027 }
1028
1029 spin_unlock(&d->event_lock);
1030
1031 return 0;
1032 }
1033
evtchn_set_priority(const struct evtchn_set_priority * set_priority)1034 static long evtchn_set_priority(const struct evtchn_set_priority *set_priority)
1035 {
1036 struct domain *d = current->domain;
1037 unsigned int port = set_priority->port;
1038 long ret;
1039
1040 spin_lock(&d->event_lock);
1041
1042 if ( !port_is_valid(d, port) )
1043 {
1044 spin_unlock(&d->event_lock);
1045 return -EINVAL;
1046 }
1047
1048 ret = evtchn_port_set_priority(d, evtchn_from_port(d, port),
1049 set_priority->priority);
1050
1051 spin_unlock(&d->event_lock);
1052
1053 return ret;
1054 }
1055
do_event_channel_op(int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)1056 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
1057 {
1058 long rc;
1059
1060 switch ( cmd )
1061 {
1062 case EVTCHNOP_alloc_unbound: {
1063 struct evtchn_alloc_unbound alloc_unbound;
1064 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
1065 return -EFAULT;
1066 rc = evtchn_alloc_unbound(&alloc_unbound);
1067 if ( !rc && __copy_to_guest(arg, &alloc_unbound, 1) )
1068 rc = -EFAULT; /* Cleaning up here would be a mess! */
1069 break;
1070 }
1071
1072 case EVTCHNOP_bind_interdomain: {
1073 struct evtchn_bind_interdomain bind_interdomain;
1074 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
1075 return -EFAULT;
1076 rc = evtchn_bind_interdomain(&bind_interdomain);
1077 if ( !rc && __copy_to_guest(arg, &bind_interdomain, 1) )
1078 rc = -EFAULT; /* Cleaning up here would be a mess! */
1079 break;
1080 }
1081
1082 case EVTCHNOP_bind_virq: {
1083 struct evtchn_bind_virq bind_virq;
1084 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
1085 return -EFAULT;
1086 rc = evtchn_bind_virq(&bind_virq, 0);
1087 if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
1088 rc = -EFAULT; /* Cleaning up here would be a mess! */
1089 break;
1090 }
1091
1092 case EVTCHNOP_bind_ipi: {
1093 struct evtchn_bind_ipi bind_ipi;
1094 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
1095 return -EFAULT;
1096 rc = evtchn_bind_ipi(&bind_ipi);
1097 if ( !rc && __copy_to_guest(arg, &bind_ipi, 1) )
1098 rc = -EFAULT; /* Cleaning up here would be a mess! */
1099 break;
1100 }
1101
1102 case EVTCHNOP_bind_pirq: {
1103 struct evtchn_bind_pirq bind_pirq;
1104 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
1105 return -EFAULT;
1106 rc = evtchn_bind_pirq(&bind_pirq);
1107 if ( !rc && __copy_to_guest(arg, &bind_pirq, 1) )
1108 rc = -EFAULT; /* Cleaning up here would be a mess! */
1109 break;
1110 }
1111
1112 case EVTCHNOP_close: {
1113 struct evtchn_close close;
1114 if ( copy_from_guest(&close, arg, 1) != 0 )
1115 return -EFAULT;
1116 rc = evtchn_close(current->domain, close.port, 1);
1117 break;
1118 }
1119
1120 case EVTCHNOP_send: {
1121 struct evtchn_send send;
1122 if ( copy_from_guest(&send, arg, 1) != 0 )
1123 return -EFAULT;
1124 rc = evtchn_send(current->domain, send.port);
1125 break;
1126 }
1127
1128 case EVTCHNOP_status: {
1129 struct evtchn_status status;
1130 if ( copy_from_guest(&status, arg, 1) != 0 )
1131 return -EFAULT;
1132 rc = evtchn_status(&status);
1133 if ( !rc && __copy_to_guest(arg, &status, 1) )
1134 rc = -EFAULT;
1135 break;
1136 }
1137
1138 case EVTCHNOP_bind_vcpu: {
1139 struct evtchn_bind_vcpu bind_vcpu;
1140 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
1141 return -EFAULT;
1142 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
1143 break;
1144 }
1145
1146 case EVTCHNOP_unmask: {
1147 struct evtchn_unmask unmask;
1148 if ( copy_from_guest(&unmask, arg, 1) != 0 )
1149 return -EFAULT;
1150 rc = evtchn_unmask(unmask.port);
1151 break;
1152 }
1153
1154 case EVTCHNOP_reset: {
1155 struct evtchn_reset reset;
1156 struct domain *d;
1157
1158 if ( copy_from_guest(&reset, arg, 1) != 0 )
1159 return -EFAULT;
1160
1161 d = rcu_lock_domain_by_any_id(reset.dom);
1162 if ( d == NULL )
1163 return -ESRCH;
1164
1165 rc = xsm_evtchn_reset(XSM_TARGET, current->domain, d);
1166 if ( !rc )
1167 rc = evtchn_reset(d);
1168
1169 rcu_unlock_domain(d);
1170 break;
1171 }
1172
1173 case EVTCHNOP_init_control: {
1174 struct evtchn_init_control init_control;
1175 if ( copy_from_guest(&init_control, arg, 1) != 0 )
1176 return -EFAULT;
1177 rc = evtchn_fifo_init_control(&init_control);
1178 if ( !rc && __copy_to_guest(arg, &init_control, 1) )
1179 rc = -EFAULT;
1180 break;
1181 }
1182
1183 case EVTCHNOP_expand_array: {
1184 struct evtchn_expand_array expand_array;
1185 if ( copy_from_guest(&expand_array, arg, 1) != 0 )
1186 return -EFAULT;
1187 rc = evtchn_fifo_expand_array(&expand_array);
1188 break;
1189 }
1190
1191 case EVTCHNOP_set_priority: {
1192 struct evtchn_set_priority set_priority;
1193 if ( copy_from_guest(&set_priority, arg, 1) != 0 )
1194 return -EFAULT;
1195 rc = evtchn_set_priority(&set_priority);
1196 break;
1197 }
1198
1199 default:
1200 rc = -ENOSYS;
1201 break;
1202 }
1203
1204 return rc;
1205 }
1206
1207
alloc_unbound_xen_event_channel(struct domain * ld,unsigned int lvcpu,domid_t remote_domid,xen_event_channel_notification_t notification_fn)1208 int alloc_unbound_xen_event_channel(
1209 struct domain *ld, unsigned int lvcpu, domid_t remote_domid,
1210 xen_event_channel_notification_t notification_fn)
1211 {
1212 struct evtchn *chn;
1213 int port, rc;
1214
1215 spin_lock(&ld->event_lock);
1216
1217 rc = get_free_port(ld);
1218 if ( rc < 0 )
1219 goto out;
1220 port = rc;
1221 chn = evtchn_from_port(ld, port);
1222
1223 rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid);
1224 if ( rc )
1225 goto out;
1226
1227 spin_lock(&chn->lock);
1228
1229 chn->state = ECS_UNBOUND;
1230 chn->xen_consumer = get_xen_consumer(notification_fn);
1231 chn->notify_vcpu_id = lvcpu;
1232 chn->u.unbound.remote_domid = remote_domid;
1233
1234 spin_unlock(&chn->lock);
1235
1236 out:
1237 spin_unlock(&ld->event_lock);
1238
1239 return rc < 0 ? rc : port;
1240 }
1241
free_xen_event_channel(struct domain * d,int port)1242 void free_xen_event_channel(struct domain *d, int port)
1243 {
1244 BUG_ON(!port_is_valid(d, port));
1245
1246 evtchn_close(d, port, 0);
1247 }
1248
1249
notify_via_xen_event_channel(struct domain * ld,int lport)1250 void notify_via_xen_event_channel(struct domain *ld, int lport)
1251 {
1252 struct evtchn *lchn, *rchn;
1253 struct domain *rd;
1254
1255 ASSERT(port_is_valid(ld, lport));
1256 lchn = evtchn_from_port(ld, lport);
1257
1258 spin_lock(&lchn->lock);
1259
1260 if ( likely(lchn->state == ECS_INTERDOMAIN) )
1261 {
1262 ASSERT(consumer_is_xen(lchn));
1263 rd = lchn->u.interdomain.remote_dom;
1264 rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port);
1265 evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
1266 }
1267
1268 spin_unlock(&lchn->lock);
1269 }
1270
evtchn_check_pollers(struct domain * d,unsigned int port)1271 void evtchn_check_pollers(struct domain *d, unsigned int port)
1272 {
1273 struct vcpu *v;
1274 unsigned int vcpuid;
1275
1276 /* Check if some VCPU might be polling for this event. */
1277 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
1278 return;
1279
1280 /* Wake any interested (or potentially interested) pollers. */
1281 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
1282 vcpuid < d->max_vcpus;
1283 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
1284 {
1285 v = d->vcpu[vcpuid];
1286 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
1287 test_and_clear_bit(vcpuid, d->poll_mask) )
1288 {
1289 v->poll_evtchn = 0;
1290 vcpu_unblock(v);
1291 }
1292 }
1293 }
1294
evtchn_init(struct domain * d)1295 int evtchn_init(struct domain *d)
1296 {
1297 evtchn_2l_init(d);
1298 d->max_evtchn_port = INT_MAX;
1299
1300 d->evtchn = alloc_evtchn_bucket(d, 0);
1301 if ( !d->evtchn )
1302 return -ENOMEM;
1303 d->valid_evtchns = EVTCHNS_PER_BUCKET;
1304
1305 spin_lock_init_prof(d, event_lock);
1306 if ( get_free_port(d) != 0 )
1307 {
1308 free_evtchn_bucket(d, d->evtchn);
1309 return -EINVAL;
1310 }
1311 evtchn_from_port(d, 0)->state = ECS_RESERVED;
1312
1313 #if MAX_VIRT_CPUS > BITS_PER_LONG
1314 d->poll_mask = xzalloc_array(unsigned long,
1315 BITS_TO_LONGS(domain_max_vcpus(d)));
1316 if ( !d->poll_mask )
1317 {
1318 free_evtchn_bucket(d, d->evtchn);
1319 return -ENOMEM;
1320 }
1321 #endif
1322
1323 return 0;
1324 }
1325
1326
evtchn_destroy(struct domain * d)1327 void evtchn_destroy(struct domain *d)
1328 {
1329 unsigned int i;
1330
1331 /* After this barrier no new event-channel allocations can occur. */
1332 BUG_ON(!d->is_dying);
1333 spin_barrier(&d->event_lock);
1334
1335 /* Close all existing event channels. */
1336 for ( i = 0; port_is_valid(d, i); i++ )
1337 evtchn_close(d, i, 0);
1338
1339 clear_global_virq_handlers(d);
1340
1341 evtchn_fifo_destroy(d);
1342 }
1343
1344
evtchn_destroy_final(struct domain * d)1345 void evtchn_destroy_final(struct domain *d)
1346 {
1347 unsigned int i, j;
1348
1349 /* Free all event-channel buckets. */
1350 for ( i = 0; i < NR_EVTCHN_GROUPS; i++ )
1351 {
1352 if ( !d->evtchn_group[i] )
1353 continue;
1354 for ( j = 0; j < BUCKETS_PER_GROUP; j++ )
1355 free_evtchn_bucket(d, d->evtchn_group[i][j]);
1356 xfree(d->evtchn_group[i]);
1357 }
1358 free_evtchn_bucket(d, d->evtchn);
1359
1360 #if MAX_VIRT_CPUS > BITS_PER_LONG
1361 xfree(d->poll_mask);
1362 d->poll_mask = NULL;
1363 #endif
1364 }
1365
1366
evtchn_move_pirqs(struct vcpu * v)1367 void evtchn_move_pirqs(struct vcpu *v)
1368 {
1369 struct domain *d = v->domain;
1370 const cpumask_t *mask = cpumask_of(v->processor);
1371 unsigned int port;
1372 struct evtchn *chn;
1373
1374 spin_lock(&d->event_lock);
1375 for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
1376 {
1377 chn = evtchn_from_port(d, port);
1378 pirq_set_affinity(d, chn->u.pirq.irq, mask);
1379 }
1380 spin_unlock(&d->event_lock);
1381 }
1382
1383
domain_dump_evtchn_info(struct domain * d)1384 static void domain_dump_evtchn_info(struct domain *d)
1385 {
1386 unsigned int port;
1387 int irq;
1388
1389 bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
1390 d->poll_mask, d->max_vcpus);
1391 printk("Event channel information for domain %d:\n"
1392 "Polling vCPUs: {%s}\n"
1393 " port [p/m/s]\n", d->domain_id, keyhandler_scratch);
1394
1395 spin_lock(&d->event_lock);
1396
1397 for ( port = 1; port < d->max_evtchns; ++port )
1398 {
1399 const struct evtchn *chn;
1400 char *ssid;
1401
1402 if ( !port_is_valid(d, port) )
1403 continue;
1404 chn = evtchn_from_port(d, port);
1405 if ( chn->state == ECS_FREE )
1406 continue;
1407
1408 printk(" %4u [%d/%d/",
1409 port,
1410 evtchn_port_is_pending(d, port),
1411 evtchn_port_is_masked(d, port));
1412 evtchn_port_print_state(d, chn);
1413 printk("]: s=%d n=%d x=%d",
1414 chn->state, chn->notify_vcpu_id, chn->xen_consumer);
1415
1416 switch ( chn->state )
1417 {
1418 case ECS_UNBOUND:
1419 printk(" d=%d", chn->u.unbound.remote_domid);
1420 break;
1421 case ECS_INTERDOMAIN:
1422 printk(" d=%d p=%d",
1423 chn->u.interdomain.remote_dom->domain_id,
1424 chn->u.interdomain.remote_port);
1425 break;
1426 case ECS_PIRQ:
1427 irq = domain_pirq_to_irq(d, chn->u.pirq.irq);
1428 printk(" p=%d i=%d", chn->u.pirq.irq, irq);
1429 break;
1430 case ECS_VIRQ:
1431 printk(" v=%d", chn->u.virq);
1432 break;
1433 }
1434
1435 ssid = xsm_show_security_evtchn(d, chn);
1436 if (ssid) {
1437 printk(" Z=%s\n", ssid);
1438 xfree(ssid);
1439 } else {
1440 printk("\n");
1441 }
1442 }
1443
1444 spin_unlock(&d->event_lock);
1445 }
1446
dump_evtchn_info(unsigned char key)1447 static void dump_evtchn_info(unsigned char key)
1448 {
1449 struct domain *d;
1450
1451 printk("'%c' pressed -> dumping event-channel info\n", key);
1452
1453 rcu_read_lock(&domlist_read_lock);
1454
1455 for_each_domain ( d )
1456 domain_dump_evtchn_info(d);
1457
1458 rcu_read_unlock(&domlist_read_lock);
1459 }
1460
dump_evtchn_info_key_init(void)1461 static int __init dump_evtchn_info_key_init(void)
1462 {
1463 register_keyhandler('e', dump_evtchn_info, "dump evtchn info", 1);
1464 return 0;
1465 }
1466 __initcall(dump_evtchn_info_key_init);
1467
1468 /*
1469 * Local variables:
1470 * mode: C
1471 * c-file-style: "BSD"
1472 * c-basic-offset: 4
1473 * tab-width: 4
1474 * indent-tabs-mode: nil
1475 * End:
1476 */
1477