1 /*
2 * ARM Virtual Generic Interrupt Controller support
3 *
4 * Ian Campbell <ian.campbell@citrix.com>
5 * Copyright (c) 2011 Citrix Systems.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #ifndef __ASM_ARM_VGIC_H__
19 #define __ASM_ARM_VGIC_H__
20
21 #include <xen/bitops.h>
22 #include <asm/mmio.h>
23 #include <asm/vreg.h>
24
25 struct pending_irq
26 {
27 /*
28 * The following two states track the lifecycle of the guest irq.
29 * However because we are not sure and we don't want to track
30 * whether an irq added to an LR register is PENDING or ACTIVE, the
31 * following states are just an approximation.
32 *
33 * GIC_IRQ_GUEST_QUEUED: the irq is asserted and queued for
34 * injection into the guest's LRs.
35 *
36 * GIC_IRQ_GUEST_VISIBLE: the irq has been added to an LR register,
37 * therefore the guest is aware of it. From the guest point of view
38 * the irq can be pending (if the guest has not acked the irq yet)
39 * or active (after acking the irq).
40 *
41 * In order for the state machine to be fully accurate, for level
42 * interrupts, we should keep the interrupt's pending state until
43 * the guest deactivates the irq. However because we are not sure
44 * when that happens, we instead track whether there is an interrupt
45 * queued using GIC_IRQ_GUEST_QUEUED. We clear it when we add it to
46 * an LR register. We set it when we receive another interrupt
47 * notification. Therefore it is possible to set
48 * GIC_IRQ_GUEST_QUEUED while the irq is GIC_IRQ_GUEST_VISIBLE. We
49 * could also change the state of the guest irq in the LR register
50 * from active to active and pending, but for simplicity we simply
51 * inject a second irq after the guest EOIs the first one.
52 *
53 *
54 * An additional state is used to keep track of whether the guest
55 * irq is enabled at the vgicd level:
56 *
57 * GIC_IRQ_GUEST_ENABLED: the guest IRQ is enabled at the VGICD
58 * level (GICD_ICENABLER/GICD_ISENABLER).
59 *
60 * GIC_IRQ_GUEST_MIGRATING: the irq is being migrated to a different
61 * vcpu while it is still inflight and on an GICH_LR register on the
62 * old vcpu.
63 *
64 * GIC_IRQ_GUEST_PRISTINE_LPI: the IRQ is a newly mapped LPI, which
65 * has never been in an LR before. This means that any trace of an
66 * LPI with the same number in an LR must be from an older LPI, which
67 * has been unmapped before.
68 *
69 */
70 #define GIC_IRQ_GUEST_QUEUED 0
71 #define GIC_IRQ_GUEST_ACTIVE 1
72 #define GIC_IRQ_GUEST_VISIBLE 2
73 #define GIC_IRQ_GUEST_ENABLED 3
74 #define GIC_IRQ_GUEST_MIGRATING 4
75 #define GIC_IRQ_GUEST_PRISTINE_LPI 5
76 unsigned long status;
77 struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
78 unsigned int irq;
79 #define GIC_INVALID_LR (uint8_t)~0
80 uint8_t lr;
81 uint8_t priority;
82 uint8_t lpi_priority; /* Caches the priority if this is an LPI. */
83 uint8_t lpi_vcpu_id; /* The VCPU for an LPI. */
84 /* inflight is used to append instances of pending_irq to
85 * vgic.inflight_irqs */
86 struct list_head inflight;
87 /* lr_queue is used to append instances of pending_irq to
88 * lr_pending. lr_pending is a per vcpu queue, therefore lr_queue
89 * accesses are protected with the vgic lock.
90 * TODO: when implementing irq migration, taking only the current
91 * vgic lock is not going to be enough. */
92 struct list_head lr_queue;
93 };
94
95 #define NR_INTERRUPT_PER_RANK 32
96 #define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1)
97
98 /* Represents state corresponding to a block of 32 interrupts */
99 struct vgic_irq_rank {
100 spinlock_t lock; /* Covers access to all other members of this struct */
101
102 uint8_t index;
103
104 uint32_t ienable;
105 uint32_t icfg[2];
106
107 /*
108 * Provide efficient access to the priority of an vIRQ while keeping
109 * the emulation simple.
110 * Note, this is working fine as long as Xen is using little endian.
111 */
112 union {
113 uint8_t priority[32];
114 uint32_t ipriorityr[8];
115 };
116
117 /*
118 * It's more convenient to store a target VCPU per vIRQ
119 * than the register ITARGETSR/IROUTER itself.
120 * Use atomic operations to read/write the vcpu fields to avoid
121 * taking the rank lock.
122 */
123 uint8_t vcpu[32];
124 };
125
126 struct sgi_target {
127 uint8_t aff1;
128 uint16_t list;
129 };
130
sgi_target_init(struct sgi_target * sgi_target)131 static inline void sgi_target_init(struct sgi_target *sgi_target)
132 {
133 sgi_target->aff1 = 0;
134 sgi_target->list = 0;
135 }
136
137 struct vgic_ops {
138 /* Initialize vGIC */
139 int (*vcpu_init)(struct vcpu *v);
140 /* Domain specific initialization of vGIC */
141 int (*domain_init)(struct domain *d);
142 /* Release resources that were allocated by domain_init */
143 void (*domain_free)(struct domain *d);
144 /* vGIC sysreg/cpregs emulate */
145 bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
146 /* lookup the struct pending_irq for a given LPI interrupt */
147 struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi);
148 int (*lpi_get_priority)(struct domain *d, uint32_t vlpi);
149 /* Maximum number of vCPU supported */
150 const unsigned int max_vcpus;
151 };
152
153 /* Number of ranks of interrupt registers for a domain */
154 #define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32)
155
156 #define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock)
157 #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
158
159 #define vgic_lock_rank(v, r, flags) spin_lock_irqsave(&(r)->lock, flags)
160 #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)
161
162 /*
163 * Rank containing GICD_<FOO><n> for GICD_<FOO> with
164 * <b>-bits-per-interrupt
165 */
REG_RANK_NR(int b,uint32_t n)166 static inline int REG_RANK_NR(int b, uint32_t n)
167 {
168 switch ( b )
169 {
170 /*
171 * IRQ ranks are of size 32. So n cannot be shifted beyond 5 for 32
172 * and above. For 64-bit n is already shifted DBAT_DOUBLE_WORD
173 * by the caller
174 */
175 case 64:
176 case 32: return n >> 5;
177 case 16: return n >> 4;
178 case 8: return n >> 3;
179 case 4: return n >> 2;
180 case 2: return n >> 1;
181 case 1: return n;
182 default: BUG();
183 }
184 }
185
186 enum gic_sgi_mode;
187
188 /*
189 * Offset of GICD_<FOO><n> with its rank, for GICD_<FOO> size <s> with
190 * <b>-bits-per-interrupt.
191 */
192 #define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32)
193
194 /*
195 * In the moment vgic_num_irqs() just covers SPIs and the private IRQs,
196 * as it's mostly used for allocating the pending_irq and irq_desc array,
197 * in which LPIs don't participate.
198 */
199 #define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32)
200
201 extern int domain_vgic_init(struct domain *d, unsigned int nr_spis);
202 extern void domain_vgic_free(struct domain *d);
203 extern int vcpu_vgic_init(struct vcpu *v);
204 extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
205 extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
206 extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
207 extern void vgic_clear_pending_irqs(struct vcpu *v);
208 extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
209 extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
210 extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq);
211 extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s);
212 extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq);
213 extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
214 extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
215 extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
216 extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
217 int vgic_v2_init(struct domain *d, int *mmio_count);
218 int vgic_v3_init(struct domain *d, int *mmio_count);
219
220 extern int domain_vgic_register(struct domain *d, int *mmio_count);
221 extern int vcpu_vgic_free(struct vcpu *v);
222 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
223 enum gic_sgi_mode irqmode, int virq,
224 const struct sgi_target *target);
225 extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq);
226
227 /* Reserve a specific guest vIRQ */
228 extern bool vgic_reserve_virq(struct domain *d, unsigned int virq);
229
230 /*
231 * Allocate a guest VIRQ
232 * - spi == 0 => allocate a PPI. It will be the same on every vCPU
233 * - spi == 1 => allocate an SPI
234 */
235 extern int vgic_allocate_virq(struct domain *d, bool spi);
236
vgic_allocate_ppi(struct domain * d)237 static inline int vgic_allocate_ppi(struct domain *d)
238 {
239 return vgic_allocate_virq(d, false /* ppi */);
240 }
241
vgic_allocate_spi(struct domain * d)242 static inline int vgic_allocate_spi(struct domain *d)
243 {
244 return vgic_allocate_virq(d, true /* spi */);
245 }
246
247 extern void vgic_free_virq(struct domain *d, unsigned int virq);
248
249 void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize,
250 paddr_t vbase, uint32_t aliased_offset);
251
252 #ifdef CONFIG_HAS_GICV3
253 struct rdist_region;
254 void vgic_v3_setup_hw(paddr_t dbase,
255 unsigned int nr_rdist_regions,
256 const struct rdist_region *regions,
257 uint32_t rdist_stride,
258 unsigned int intid_bits);
259 #endif
260
261 #endif /* __ASM_ARM_VGIC_H__ */
262
263 /*
264 * Local variables:
265 * mode: C
266 * c-file-style: "BSD"
267 * c-basic-offset: 4
268 * indent-tabs-mode: nil
269 * End:
270 */
271