1 /*
2 * Event channel port operations.
3 *
4 * Copyright (c) 2003-2006, K A Fraser.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2 or later. See the file COPYING for more details.
8 */
9
10 #include "event_channel.h"
11
12 #include <xen/init.h>
13 #include <xen/lib.h>
14 #include <xen/errno.h>
15 #include <xen/sched.h>
16
17 #include <asm/guest_atomics.h>
18
evtchn_2l_set_pending(struct vcpu * v,struct evtchn * evtchn)19 static void cf_check evtchn_2l_set_pending(
20 struct vcpu *v, struct evtchn *evtchn)
21 {
22 struct domain *d = v->domain;
23 unsigned int port = evtchn->port;
24
25 /*
26 * The following bit operations must happen in strict order.
27 * NB. On x86, the atomic bit operations also act as memory barriers.
28 * There is therefore sufficiently strict ordering for this architecture --
29 * others may require explicit memory barriers.
30 */
31
32 if ( guest_test_and_set_bit(d, port, &shared_info(d, evtchn_pending)) )
33 return;
34
35 if ( !guest_test_bit(d, port, &shared_info(d, evtchn_mask)) &&
36 !guest_test_and_set_bit(d, port / BITS_PER_EVTCHN_WORD(d),
37 &vcpu_info(v, evtchn_pending_sel)) )
38 {
39 vcpu_mark_events_pending(v);
40 }
41
42 evtchn_check_pollers(d, port);
43 }
44
evtchn_2l_clear_pending(struct domain * d,struct evtchn * evtchn)45 static void cf_check evtchn_2l_clear_pending(
46 struct domain *d, struct evtchn *evtchn)
47 {
48 guest_clear_bit(d, evtchn->port, &shared_info(d, evtchn_pending));
49 }
50
evtchn_2l_unmask(struct domain * d,struct evtchn * evtchn)51 static void cf_check evtchn_2l_unmask(
52 struct domain *d, struct evtchn *evtchn)
53 {
54 struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
55 unsigned int port = evtchn->port;
56
57 /*
58 * These operations must happen in strict order. Based on
59 * evtchn_2l_set_pending() above.
60 */
61 if ( guest_test_and_clear_bit(d, port, &shared_info(d, evtchn_mask)) &&
62 guest_test_bit(d, port, &shared_info(d, evtchn_pending)) &&
63 !guest_test_and_set_bit(d, port / BITS_PER_EVTCHN_WORD(d),
64 &vcpu_info(v, evtchn_pending_sel)) )
65 {
66 vcpu_mark_events_pending(v);
67 }
68 }
69
evtchn_2l_is_pending(const struct domain * d,const struct evtchn * evtchn)70 static bool cf_check evtchn_2l_is_pending(
71 const struct domain *d, const struct evtchn *evtchn)
72 {
73 evtchn_port_t port = evtchn->port;
74 unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
75
76 ASSERT(port < max_ports);
77 return (port < max_ports &&
78 guest_test_bit(d, port, &shared_info(d, evtchn_pending)));
79 }
80
evtchn_2l_is_masked(const struct domain * d,const struct evtchn * evtchn)81 static bool cf_check evtchn_2l_is_masked(
82 const struct domain *d, const struct evtchn *evtchn)
83 {
84 evtchn_port_t port = evtchn->port;
85 unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);
86
87 ASSERT(port < max_ports);
88 return (port >= max_ports ||
89 guest_test_bit(d, port, &shared_info(d, evtchn_mask)));
90 }
91
evtchn_2l_print_state(struct domain * d,const struct evtchn * evtchn)92 static void cf_check evtchn_2l_print_state(
93 struct domain *d, const struct evtchn *evtchn)
94 {
95 struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
96
97 printk("%d", !!test_bit(evtchn->port / BITS_PER_EVTCHN_WORD(d),
98 &vcpu_info(v, evtchn_pending_sel)));
99 }
100
101 static const struct evtchn_port_ops evtchn_port_ops_2l =
102 {
103 .set_pending = evtchn_2l_set_pending,
104 .clear_pending = evtchn_2l_clear_pending,
105 .unmask = evtchn_2l_unmask,
106 .is_pending = evtchn_2l_is_pending,
107 .is_masked = evtchn_2l_is_masked,
108 .print_state = evtchn_2l_print_state,
109 };
110
evtchn_2l_init(struct domain * d)111 void evtchn_2l_init(struct domain *d)
112 {
113 d->evtchn_port_ops = &evtchn_port_ops_2l;
114 }
115
116 /*
117 * Local variables:
118 * mode: C
119 * c-file-style: "BSD"
120 * c-basic-offset: 4
121 * tab-width: 4
122 * indent-tabs-mode: nil
123 * End:
124 */
125