1 /******************************************************************************
2 * event.h
3 *
4 * A nice interface for passing asynchronous events to guest OSes.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
8
9 #ifndef __XEN_EVENT_H__
10 #define __XEN_EVENT_H__
11
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <xen/softirq.h>
15 #include <xen/bitops.h>
16 #include <asm/event.h>
17
18 /*
19 * send_guest_vcpu_virq: Notify guest via a per-VCPU VIRQ.
20 * @v: VCPU to which virtual IRQ should be sent
21 * @virq: Virtual IRQ number (VIRQ_*)
22 */
23 void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq);
24
25 /*
26 * send_global_virq: Notify the domain handling a global VIRQ.
27 * @virq: Virtual IRQ number (VIRQ_*)
28 */
29 void send_global_virq(uint32_t virq);
30
31 /*
32 * sent_global_virq_handler: Set a global VIRQ handler.
33 * @d: New target domain for this VIRQ
34 * @virq: Virtual IRQ number (VIRQ_*), must be global
35 */
36 int set_global_virq_handler(struct domain *d, uint32_t virq);
37
38 /*
39 * send_guest_pirq:
40 * @d: Domain to which physical IRQ should be sent
41 * @pirq: Physical IRQ number
42 */
43 void send_guest_pirq(struct domain *, const struct pirq *);
44
45 /* Send a notification from a given domain's event-channel port. */
46 int evtchn_send(struct domain *d, unsigned int lport);
47
48 /* Bind a local event-channel port to the specified VCPU. */
49 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
50
51 /* Bind a VIRQ. */
52 int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port);
53
54 /* Get the status of an event channel port. */
55 int evtchn_status(evtchn_status_t *status);
56
57 /* Close an event channel. */
58 int evtchn_close(struct domain *d1, int port1, bool guest);
59
60 /* Free an event channel. */
61 void evtchn_free(struct domain *d, struct evtchn *chn);
62
63 /* Allocate a specific event channel port. */
64 int evtchn_allocate_port(struct domain *d, unsigned int port);
65
66 /* Unmask a local event-channel port. */
67 int evtchn_unmask(unsigned int port);
68
69 /* Move all PIRQs after a vCPU was moved to another pCPU. */
70 void evtchn_move_pirqs(struct vcpu *v);
71
72 /* Allocate/free a Xen-attached event channel port. */
73 typedef void (*xen_event_channel_notification_t)(
74 struct vcpu *v, unsigned int port);
75 int alloc_unbound_xen_event_channel(
76 struct domain *ld, unsigned int lvcpu, domid_t remote_domid,
77 xen_event_channel_notification_t notification_fn);
78 void free_xen_event_channel(struct domain *d, int port);
79
80 /* Query if event channel is in use by the guest */
81 int guest_enabled_event(struct vcpu *v, uint32_t virq);
82
83 /* Notify remote end of a Xen-attached event channel.*/
84 void notify_via_xen_event_channel(struct domain *ld, int lport);
85
86 /* Inject an event channel notification into the guest */
87 void arch_evtchn_inject(struct vcpu *v);
88
89 /*
90 * Internal event channel object storage.
91 *
92 * The objects (struct evtchn) are indexed using a two level scheme of
93 * groups and buckets. Each group is a page of bucket pointers. Each
94 * bucket is a page-sized array of struct evtchn's.
95 *
96 * The first bucket is directly accessed via d->evtchn.
97 */
98 #define group_from_port(d, p) \
99 ((d)->evtchn_group[(p) / EVTCHNS_PER_GROUP])
100 #define bucket_from_port(d, p) \
101 ((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
102
port_is_valid(struct domain * d,unsigned int p)103 static inline bool_t port_is_valid(struct domain *d, unsigned int p)
104 {
105 if ( p >= d->max_evtchns )
106 return 0;
107 return p < read_atomic(&d->valid_evtchns);
108 }
109
evtchn_from_port(struct domain * d,unsigned int p)110 static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
111 {
112 if ( p < EVTCHNS_PER_BUCKET )
113 return &d->evtchn[p];
114 return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
115 }
116
117 /* Wait on a Xen-attached event channel. */
118 #define wait_on_xen_event_channel(port, condition) \
119 do { \
120 if ( condition ) \
121 break; \
122 set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
123 smp_mb(); /* set blocked status /then/ re-evaluate condition */ \
124 if ( condition ) \
125 { \
126 clear_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
127 break; \
128 } \
129 raise_softirq(SCHEDULE_SOFTIRQ); \
130 do_softirq(); \
131 } while ( 0 )
132
133 #define prepare_wait_on_xen_event_channel(port) \
134 do { \
135 set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
136 raise_softirq(SCHEDULE_SOFTIRQ); \
137 smp_mb(); /* set blocked status /then/ caller does his work */ \
138 } while ( 0 )
139
140 void evtchn_check_pollers(struct domain *d, unsigned int port);
141
142 void evtchn_2l_init(struct domain *d);
143
144 /* Close all event channels and reset to 2-level ABI. */
145 int evtchn_reset(struct domain *d);
146
147 /*
148 * Low-level event channel port ops.
149 */
150 struct evtchn_port_ops {
151 void (*init)(struct domain *d, struct evtchn *evtchn);
152 void (*set_pending)(struct vcpu *v, struct evtchn *evtchn);
153 void (*clear_pending)(struct domain *d, struct evtchn *evtchn);
154 void (*unmask)(struct domain *d, struct evtchn *evtchn);
155 bool (*is_pending)(const struct domain *d, evtchn_port_t port);
156 bool (*is_masked)(const struct domain *d, evtchn_port_t port);
157 /*
158 * Is the port unavailable because it's still being cleaned up
159 * after being closed?
160 */
161 bool (*is_busy)(const struct domain *d, evtchn_port_t port);
162 int (*set_priority)(struct domain *d, struct evtchn *evtchn,
163 unsigned int priority);
164 void (*print_state)(struct domain *d, const struct evtchn *evtchn);
165 };
166
evtchn_port_init(struct domain * d,struct evtchn * evtchn)167 static inline void evtchn_port_init(struct domain *d, struct evtchn *evtchn)
168 {
169 if ( d->evtchn_port_ops->init )
170 d->evtchn_port_ops->init(d, evtchn);
171 }
172
evtchn_port_set_pending(struct domain * d,unsigned int vcpu_id,struct evtchn * evtchn)173 static inline void evtchn_port_set_pending(struct domain *d,
174 unsigned int vcpu_id,
175 struct evtchn *evtchn)
176 {
177 d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
178 }
179
evtchn_port_clear_pending(struct domain * d,struct evtchn * evtchn)180 static inline void evtchn_port_clear_pending(struct domain *d,
181 struct evtchn *evtchn)
182 {
183 d->evtchn_port_ops->clear_pending(d, evtchn);
184 }
185
evtchn_port_unmask(struct domain * d,struct evtchn * evtchn)186 static inline void evtchn_port_unmask(struct domain *d,
187 struct evtchn *evtchn)
188 {
189 d->evtchn_port_ops->unmask(d, evtchn);
190 }
191
evtchn_port_is_pending(const struct domain * d,evtchn_port_t port)192 static inline bool evtchn_port_is_pending(const struct domain *d,
193 evtchn_port_t port)
194 {
195 return d->evtchn_port_ops->is_pending(d, port);
196 }
197
evtchn_port_is_masked(const struct domain * d,evtchn_port_t port)198 static inline bool evtchn_port_is_masked(const struct domain *d,
199 evtchn_port_t port)
200 {
201 return d->evtchn_port_ops->is_masked(d, port);
202 }
203
evtchn_port_is_busy(const struct domain * d,evtchn_port_t port)204 static inline bool evtchn_port_is_busy(const struct domain *d,
205 evtchn_port_t port)
206 {
207 return d->evtchn_port_ops->is_busy &&
208 d->evtchn_port_ops->is_busy(d, port);
209 }
210
evtchn_port_set_priority(struct domain * d,struct evtchn * evtchn,unsigned int priority)211 static inline int evtchn_port_set_priority(struct domain *d,
212 struct evtchn *evtchn,
213 unsigned int priority)
214 {
215 if ( !d->evtchn_port_ops->set_priority )
216 return -ENOSYS;
217 return d->evtchn_port_ops->set_priority(d, evtchn, priority);
218 }
219
evtchn_port_print_state(struct domain * d,const struct evtchn * evtchn)220 static inline void evtchn_port_print_state(struct domain *d,
221 const struct evtchn *evtchn)
222 {
223 d->evtchn_port_ops->print_state(d, evtchn);
224 }
225
226 #endif /* __XEN_EVENT_H__ */
227