1 /*
2  * VGICv2 MMIO handling functions
3  * Imported from Linux ("new" KVM VGIC) and heavily adapted to Xen.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <xen/bitops.h>
16 #include <xen/sched.h>
17 #include <xen/sizes.h>
18 #include <asm/new_vgic.h>
19 
20 #include "vgic.h"
21 #include "vgic-mmio.h"
22 
vgic_mmio_read_v2_misc(struct vcpu * vcpu,paddr_t addr,unsigned int len)23 static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu,
24                                             paddr_t addr, unsigned int len)
25 {
26     uint32_t value;
27 
28     switch ( addr & 0x0c )      /* filter for the 4 registers handled here */
29     {
30     case GICD_CTLR:
31         value = vcpu->domain->arch.vgic.enabled ? GICD_CTL_ENABLE : 0;
32         break;
33     case GICD_TYPER:
34         value = vcpu->domain->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
35         value = (value >> 5) - 1;       /* stored as multiples of 32 */
36         value |= (vcpu->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT;
37         break;
38     case GICD_IIDR:
39         value = (PRODUCT_ID_KVM << 24) |
40                 (VARIANT_ID_XEN << 16) |
41                 (IMPLEMENTER_ARM << 0);
42         break;
43     default:
44         return 0;
45     }
46 
47     return value;
48 }
49 
vgic_mmio_write_v2_misc(struct vcpu * vcpu,paddr_t addr,unsigned int len,unsigned long val)50 static void vgic_mmio_write_v2_misc(struct vcpu *vcpu,
51                                     paddr_t addr, unsigned int len,
52                                     unsigned long val)
53 {
54     struct vgic_dist *dist = &vcpu->domain->arch.vgic;
55     bool enabled;
56 
57     switch ( addr & 0x0c )      /* filter for the 4 registers handled here */
58     {
59     case GICD_CTLR:
60         domain_lock(vcpu->domain);
61 
62         /*
63          * Store the new enabled state in our distributor structure.
64          * Work out whether it was disabled before and now got enabled,
65          * so that we signal all VCPUs to check for interrupts to be injected.
66          */
67         enabled = dist->enabled;
68         dist->enabled = val & GICD_CTL_ENABLE;
69         enabled = !enabled && dist->enabled;
70 
71         domain_unlock(vcpu->domain);
72 
73         if ( enabled )
74             vgic_kick_vcpus(vcpu->domain);
75 
76         break;
77     case GICD_TYPER:
78     case GICD_IIDR:
79         /* read-only, writes ignored */
80         return;
81     }
82 }
83 
vgic_mmio_write_sgir(struct vcpu * source_vcpu,paddr_t addr,unsigned int len,unsigned long val)84 static void vgic_mmio_write_sgir(struct vcpu *source_vcpu,
85                                  paddr_t addr, unsigned int len,
86                                  unsigned long val)
87 {
88     struct domain *d = source_vcpu->domain;
89     unsigned int nr_vcpus = d->max_vcpus;
90     unsigned int intid = val & GICD_SGI_INTID_MASK;
91     unsigned long targets = (val & GICD_SGI_TARGET_MASK) >>
92                             GICD_SGI_TARGET_SHIFT;
93     unsigned int vcpu_id;
94 
95     switch ( val & GICD_SGI_TARGET_LIST_MASK )
96     {
97     case GICD_SGI_TARGET_LIST:                    /* as specified by targets */
98         targets &= GENMASK(nr_vcpus - 1, 0);      /* limit to existing VCPUs */
99         break;
100     case GICD_SGI_TARGET_OTHERS:
101         targets = GENMASK(nr_vcpus - 1, 0);       /* all, ...   */
102         targets &= ~(1U << source_vcpu->vcpu_id); /*   but self */
103         break;
104     case GICD_SGI_TARGET_SELF:                    /* this very vCPU only */
105         targets = (1U << source_vcpu->vcpu_id);
106         break;
107     case 0x3:                                     /* reserved */
108         return;
109     }
110 
111     for_each_set_bit( vcpu_id, &targets, 8 )
112     {
113         struct vcpu *vcpu = d->vcpu[vcpu_id];
114         struct vgic_irq *irq = vgic_get_irq(d, vcpu, intid);
115         unsigned long flags;
116 
117         spin_lock_irqsave(&irq->irq_lock, flags);
118 
119         irq->pending_latch = true;
120         irq->source |= 1U << source_vcpu->vcpu_id;
121 
122         vgic_queue_irq_unlock(d, irq, flags);
123         vgic_put_irq(d, irq);
124     }
125 }
126 
vgic_mmio_read_target(struct vcpu * vcpu,paddr_t addr,unsigned int len)127 static unsigned long vgic_mmio_read_target(struct vcpu *vcpu,
128                                            paddr_t addr, unsigned int len)
129 {
130     uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
131     uint32_t val = 0;
132     unsigned int i;
133 
134     for ( i = 0; i < len; i++ )
135     {
136         struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
137 
138         val |= (uint32_t)irq->targets << (i * 8);
139 
140         vgic_put_irq(vcpu->domain, irq);
141     }
142 
143     return val;
144 }
145 
vgic_mmio_write_target(struct vcpu * vcpu,paddr_t addr,unsigned int len,unsigned long val)146 static void vgic_mmio_write_target(struct vcpu *vcpu,
147                                    paddr_t addr, unsigned int len,
148                                    unsigned long val)
149 {
150     uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
151     uint8_t cpu_mask = GENMASK(vcpu->domain->max_vcpus - 1, 0);
152     unsigned int i;
153     unsigned long flags;
154 
155     /* GICD_ITARGETSR[0-7] are read-only */
156     if ( intid < VGIC_NR_PRIVATE_IRQS )
157         return;
158 
159     for ( i = 0; i < len; i++ )
160     {
161         struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
162 
163         spin_lock_irqsave(&irq->irq_lock, flags);
164 
165         irq->targets = (val >> (i * 8)) & cpu_mask;
166         if ( irq->targets )
167         {
168             irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
169             if ( irq->hw )
170             {
171                 struct irq_desc *desc = irq_to_desc(irq->hwintid);
172 
173                 irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
174             }
175         }
176         else
177             irq->target_vcpu = NULL;
178 
179         spin_unlock_irqrestore(&irq->irq_lock, flags);
180         vgic_put_irq(vcpu->domain, irq);
181     }
182 }
183 
vgic_mmio_read_sgipend(struct vcpu * vcpu,paddr_t addr,unsigned int len)184 static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu,
185                                             paddr_t addr, unsigned int len)
186 {
187     uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
188     uint32_t val = 0;
189     unsigned int i;
190 
191     ASSERT(intid < VGIC_NR_SGIS);
192 
193     for ( i = 0; i < len; i++ )
194     {
195         struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
196 
197         val |= (uint32_t)irq->source << (i * 8);
198 
199         vgic_put_irq(vcpu->domain, irq);
200     }
201 
202     return val;
203 }
204 
vgic_mmio_write_sgipendc(struct vcpu * vcpu,paddr_t addr,unsigned int len,unsigned long val)205 static void vgic_mmio_write_sgipendc(struct vcpu *vcpu,
206                                      paddr_t addr, unsigned int len,
207                                      unsigned long val)
208 {
209     uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
210     unsigned int i;
211     unsigned long flags;
212 
213     ASSERT(intid < VGIC_NR_SGIS);
214 
215     for ( i = 0; i < len; i++ )
216     {
217         struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
218 
219         spin_lock_irqsave(&irq->irq_lock, flags);
220 
221         irq->source &= ~((val >> (i * 8)) & 0xff);
222         if ( !irq->source )
223             irq->pending_latch = false;
224 
225         spin_unlock_irqrestore(&irq->irq_lock, flags);
226         vgic_put_irq(vcpu->domain, irq);
227     }
228 }
229 
vgic_mmio_write_sgipends(struct vcpu * vcpu,paddr_t addr,unsigned int len,unsigned long val)230 static void vgic_mmio_write_sgipends(struct vcpu *vcpu,
231                                      paddr_t addr, unsigned int len,
232                                      unsigned long val)
233 {
234     uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8);
235     unsigned int i;
236     unsigned long flags;
237 
238     ASSERT(intid < VGIC_NR_SGIS);
239 
240     for ( i = 0; i < len; i++ )
241     {
242         struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
243 
244         spin_lock_irqsave(&irq->irq_lock, flags);
245 
246         irq->source |= (val >> (i * 8)) & 0xff;
247 
248         if ( irq->source )
249         {
250             irq->pending_latch = true;
251             vgic_queue_irq_unlock(vcpu->domain, irq, flags);
252         }
253         else
254         {
255             spin_unlock_irqrestore(&irq->irq_lock, flags);
256         }
257         vgic_put_irq(vcpu->domain, irq);
258     }
259 }
260 
261 static const struct vgic_register_region vgic_v2_dist_registers[] = {
262     REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
263         vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
264         VGIC_ACCESS_32bit),
265     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR,
266         vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
267         VGIC_ACCESS_32bit),
268     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
269         vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
270         VGIC_ACCESS_32bit),
271     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
272         vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
273         VGIC_ACCESS_32bit),
274     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
275         vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
276         VGIC_ACCESS_32bit),
277     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
278         vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
279         VGIC_ACCESS_32bit),
280     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
281         vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
282         VGIC_ACCESS_32bit),
283     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER,
284         vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
285         VGIC_ACCESS_32bit),
286     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR,
287         vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
288         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
289     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR,
290         vgic_mmio_read_target, vgic_mmio_write_target, 8,
291         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
292     REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR,
293         vgic_mmio_read_config, vgic_mmio_write_config, 2,
294         VGIC_ACCESS_32bit),
295     REGISTER_DESC_WITH_LENGTH(GICD_SGIR,
296         vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
297         VGIC_ACCESS_32bit),
298     REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR,
299         vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
300         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
301     REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR,
302         vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
303         VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
304 };
305 
vgic_v2_init_dist_iodev(struct vgic_io_device * dev)306 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
307 {
308     dev->regions = vgic_v2_dist_registers;
309     dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
310 
311     return SZ_4K;
312 }
313 
314 /*
315  * Local variables:
316  * mode: C
317  * c-file-style: "BSD"
318  * c-basic-offset: 4
319  * indent-tabs-mode: nil
320  * End:
321  */
322