1 /*
2 * xen/arch/arm/vgic-v3-its.c
3 *
4 * ARM Interrupt Translation Service (ITS) emulation
5 *
6 * Andre Przywara <andre.przywara@arm.com>
7 * Copyright (c) 2016,2017 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; under version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 /*
23 * Locking order:
24 *
25 * its->vcmd_lock (protects the command queue)
26 * its->its_lock (protects the translation tables)
27 * d->its_devices_lock (protects the device RB tree)
28 * v->vgic.lock (protects the struct pending_irq)
29 * d->pend_lpi_tree_lock (protects the radix tree)
30 */
31
32 #include <xen/bitops.h>
33 #include <xen/config.h>
34 #include <xen/domain_page.h>
35 #include <xen/lib.h>
36 #include <xen/init.h>
37 #include <xen/softirq.h>
38 #include <xen/irq.h>
39 #include <xen/sched.h>
40 #include <xen/sizes.h>
41 #include <asm/current.h>
42 #include <asm/guest_access.h>
43 #include <asm/mmio.h>
44 #include <asm/gic_v3_defs.h>
45 #include <asm/gic_v3_its.h>
46 #include <asm/vgic.h>
47 #include <asm/vgic-emul.h>
48
49 /*
50 * Data structure to describe a virtual ITS.
51 * If both the vcmd_lock and the its_lock are required, the vcmd_lock must
52 * be taken first.
53 */
54 struct virt_its {
55 struct domain *d;
56 struct list_head vits_list;
57 paddr_t doorbell_address;
58 unsigned int devid_bits;
59 unsigned int evid_bits;
60 spinlock_t vcmd_lock; /* Protects the virtual command buffer, which */
61 uint64_t cwriter; /* consists of CWRITER and CREADR and those */
62 uint64_t creadr; /* shadow variables cwriter and creadr. */
63 /* Protects the rest of this structure, including the ITS tables. */
64 spinlock_t its_lock;
65 uint64_t cbaser;
66 uint64_t baser_dev, baser_coll; /* BASER0 and BASER1 for the guest */
67 unsigned int max_collections;
68 unsigned int max_devices;
69 /* changing "enabled" requires to hold *both* the vcmd_lock and its_lock */
70 bool enabled;
71 };
72
73 /*
74 * An Interrupt Translation Table Entry: this is indexed by a
75 * DeviceID/EventID pair and is located in guest memory.
76 */
77 struct vits_itte
78 {
79 uint32_t vlpi;
80 uint16_t collection;
81 uint16_t pad;
82 };
83
84 /*
85 * Our collection table encoding:
86 * Each entry just contains the VCPU ID of the respective vCPU.
87 */
88 typedef uint16_t coll_table_entry_t;
89 #define UNMAPPED_COLLECTION ((coll_table_entry_t)~0)
90
91 /*
92 * Our device table encodings:
93 * Contains the guest physical address of the Interrupt Translation Table in
94 * bits [51:8], and the size of it is encoded as the number of bits minus one
95 * in the lowest 5 bits of the word.
96 */
97 typedef uint64_t dev_table_entry_t;
98 #define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
99 #define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1))
100 #define DEV_TABLE_ENTRY(addr, bits) \
101 (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0)))
102
103 #define GITS_BASER_RO_MASK (GITS_BASER_TYPE_MASK | \
104 (0x1fL << GITS_BASER_ENTRY_SIZE_SHIFT))
105
106 /*
107 * The physical address is encoded slightly differently depending on
108 * the used page size: the highest four bits are stored in the lowest
109 * four bits of the field for 64K pages.
110 */
get_baser_phys_addr(uint64_t reg)111 static paddr_t get_baser_phys_addr(uint64_t reg)
112 {
113 if ( reg & BIT(9) )
114 return (reg & GENMASK(47, 16)) |
115 ((reg & GENMASK(15, 12)) << 36);
116 else
117 return reg & GENMASK(47, 12);
118 }
119
120 /* Must be called with the ITS lock held. */
its_set_collection(struct virt_its * its,uint16_t collid,coll_table_entry_t vcpu_id)121 static int its_set_collection(struct virt_its *its, uint16_t collid,
122 coll_table_entry_t vcpu_id)
123 {
124 paddr_t addr = get_baser_phys_addr(its->baser_coll);
125
126 /* The collection table entry must be able to store a VCPU ID. */
127 BUILD_BUG_ON(BIT(sizeof(coll_table_entry_t) * 8) < MAX_VIRT_CPUS);
128
129 ASSERT(spin_is_locked(&its->its_lock));
130
131 if ( collid >= its->max_collections )
132 return -ENOENT;
133
134 return access_guest_memory_by_ipa(its->d,
135 addr + collid * sizeof(coll_table_entry_t),
136 &vcpu_id, sizeof(vcpu_id), true);
137 }
138
139 /* Must be called with the ITS lock held. */
get_vcpu_from_collection(struct virt_its * its,uint16_t collid)140 static struct vcpu *get_vcpu_from_collection(struct virt_its *its,
141 uint16_t collid)
142 {
143 paddr_t addr = get_baser_phys_addr(its->baser_coll);
144 coll_table_entry_t vcpu_id;
145 int ret;
146
147 ASSERT(spin_is_locked(&its->its_lock));
148
149 if ( collid >= its->max_collections )
150 return NULL;
151
152 ret = access_guest_memory_by_ipa(its->d,
153 addr + collid * sizeof(coll_table_entry_t),
154 &vcpu_id, sizeof(coll_table_entry_t), false);
155 if ( ret )
156 return NULL;
157
158 if ( vcpu_id == UNMAPPED_COLLECTION || vcpu_id >= its->d->max_vcpus )
159 return NULL;
160
161 return its->d->vcpu[vcpu_id];
162 }
163
164 /* Set the address of an ITT for a given device ID. */
its_set_itt_address(struct virt_its * its,uint32_t devid,paddr_t itt_address,uint32_t nr_bits)165 static int its_set_itt_address(struct virt_its *its, uint32_t devid,
166 paddr_t itt_address, uint32_t nr_bits)
167 {
168 paddr_t addr = get_baser_phys_addr(its->baser_dev);
169 dev_table_entry_t itt_entry = DEV_TABLE_ENTRY(itt_address, nr_bits);
170
171 if ( devid >= its->max_devices )
172 return -ENOENT;
173
174 return access_guest_memory_by_ipa(its->d,
175 addr + devid * sizeof(dev_table_entry_t),
176 &itt_entry, sizeof(itt_entry), true);
177 }
178
179 /*
180 * Lookup the address of the Interrupt Translation Table associated with
181 * that device ID.
182 * TODO: add support for walking indirect tables.
183 */
its_get_itt(struct virt_its * its,uint32_t devid,dev_table_entry_t * itt)184 static int its_get_itt(struct virt_its *its, uint32_t devid,
185 dev_table_entry_t *itt)
186 {
187 paddr_t addr = get_baser_phys_addr(its->baser_dev);
188
189 if ( devid >= its->max_devices )
190 return -EINVAL;
191
192 return access_guest_memory_by_ipa(its->d,
193 addr + devid * sizeof(dev_table_entry_t),
194 itt, sizeof(*itt), false);
195 }
196
197 /*
198 * Lookup the address of the Interrupt Translation Table associated with
199 * a device ID and return the address of the ITTE belonging to the event ID
200 * (which is an index into that table).
201 */
its_get_itte_address(struct virt_its * its,uint32_t devid,uint32_t evid)202 static paddr_t its_get_itte_address(struct virt_its *its,
203 uint32_t devid, uint32_t evid)
204 {
205 dev_table_entry_t itt;
206 int ret;
207
208 ret = its_get_itt(its, devid, &itt);
209 if ( ret )
210 return INVALID_PADDR;
211
212 if ( evid >= DEV_TABLE_ITT_SIZE(itt) ||
213 DEV_TABLE_ITT_ADDR(itt) == INVALID_PADDR )
214 return INVALID_PADDR;
215
216 return DEV_TABLE_ITT_ADDR(itt) + evid * sizeof(struct vits_itte);
217 }
218
219 /*
220 * Queries the collection and device tables to get the vCPU and virtual
221 * LPI number for a given guest event. This first accesses the guest memory
222 * to resolve the address of the ITTE, then reads the ITTE entry at this
223 * address and puts the result in vcpu_ptr and vlpi_ptr.
224 * Must be called with the ITS lock held.
225 */
read_itte(struct virt_its * its,uint32_t devid,uint32_t evid,struct vcpu ** vcpu_ptr,uint32_t * vlpi_ptr)226 static bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
227 struct vcpu **vcpu_ptr, uint32_t *vlpi_ptr)
228 {
229 paddr_t addr;
230 struct vits_itte itte;
231 struct vcpu *vcpu;
232
233 ASSERT(spin_is_locked(&its->its_lock));
234
235 addr = its_get_itte_address(its, devid, evid);
236 if ( addr == INVALID_PADDR )
237 return false;
238
239 if ( access_guest_memory_by_ipa(its->d, addr, &itte, sizeof(itte), false) )
240 return false;
241
242 vcpu = get_vcpu_from_collection(its, itte.collection);
243 if ( !vcpu )
244 return false;
245
246 *vcpu_ptr = vcpu;
247 *vlpi_ptr = itte.vlpi;
248 return true;
249 }
250
251 /*
252 * Queries the collection and device tables to translate the device ID and
253 * event ID and find the appropriate ITTE. The given collection ID and the
254 * virtual LPI number are then stored into that entry.
255 * If vcpu_ptr is provided, returns the VCPU belonging to that collection.
256 * Must be called with the ITS lock held.
257 */
write_itte(struct virt_its * its,uint32_t devid,uint32_t evid,uint32_t collid,uint32_t vlpi)258 static bool write_itte(struct virt_its *its, uint32_t devid,
259 uint32_t evid, uint32_t collid, uint32_t vlpi)
260 {
261 paddr_t addr;
262 struct vits_itte itte;
263
264 ASSERT(spin_is_locked(&its->its_lock));
265
266 addr = its_get_itte_address(its, devid, evid);
267 if ( addr == INVALID_PADDR )
268 return false;
269
270 itte.collection = collid;
271 itte.vlpi = vlpi;
272
273 if ( access_guest_memory_by_ipa(its->d, addr, &itte, sizeof(itte), true) )
274 return false;
275
276 return true;
277 }
278
279 /**************************************
280 * Functions that handle ITS commands *
281 **************************************/
282
its_cmd_mask_field(uint64_t * its_cmd,unsigned int word,unsigned int shift,unsigned int size)283 static uint64_t its_cmd_mask_field(uint64_t *its_cmd, unsigned int word,
284 unsigned int shift, unsigned int size)
285 {
286 return (its_cmd[word] >> shift) & GENMASK(size - 1, 0);
287 }
288
289 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
290 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
291 #define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5)
292 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
293 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
294 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
295 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
296 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
297 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
298
its_handle_int(struct virt_its * its,uint64_t * cmdptr)299 static int its_handle_int(struct virt_its *its, uint64_t *cmdptr)
300 {
301 uint32_t devid = its_cmd_get_deviceid(cmdptr);
302 uint32_t eventid = its_cmd_get_id(cmdptr);
303 struct vcpu *vcpu;
304 uint32_t vlpi;
305 bool ret;
306
307 spin_lock(&its->its_lock);
308 ret = read_itte(its, devid, eventid, &vcpu, &vlpi);
309 spin_unlock(&its->its_lock);
310 if ( !ret )
311 return -1;
312
313 if ( vlpi == INVALID_LPI )
314 return -1;
315
316 vgic_vcpu_inject_lpi(its->d, vlpi);
317
318 return 0;
319 }
320
its_handle_mapc(struct virt_its * its,uint64_t * cmdptr)321 static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
322 {
323 uint32_t collid = its_cmd_get_collection(cmdptr);
324 uint64_t rdbase = its_cmd_mask_field(cmdptr, 2, 16, 44);
325
326 if ( collid >= its->max_collections )
327 return -1;
328
329 if ( rdbase >= its->d->max_vcpus )
330 return -1;
331
332 spin_lock(&its->its_lock);
333
334 if ( its_cmd_get_validbit(cmdptr) )
335 its_set_collection(its, collid, rdbase);
336 else
337 its_set_collection(its, collid, UNMAPPED_COLLECTION);
338
339 spin_unlock(&its->its_lock);
340
341 return 0;
342 }
343
344 /*
345 * CLEAR removes the pending state from an LPI. */
its_handle_clear(struct virt_its * its,uint64_t * cmdptr)346 static int its_handle_clear(struct virt_its *its, uint64_t *cmdptr)
347 {
348 uint32_t devid = its_cmd_get_deviceid(cmdptr);
349 uint32_t eventid = its_cmd_get_id(cmdptr);
350 struct pending_irq *p;
351 struct vcpu *vcpu;
352 uint32_t vlpi;
353 unsigned long flags;
354 int ret = -1;
355
356 spin_lock(&its->its_lock);
357
358 /* Translate the DevID/EvID pair into a vCPU/vLPI pair. */
359 if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
360 goto out_unlock;
361
362 p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address,
363 devid, eventid);
364 /* Protect against an invalid LPI number. */
365 if ( unlikely(!p) )
366 goto out_unlock;
367
368 /*
369 * TODO: This relies on the VCPU being correct in the ITS tables.
370 * This can be fixed by either using a per-IRQ lock or by using
371 * the VCPU ID from the pending_irq instead.
372 */
373 spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
374
375 /*
376 * If the LPI is already visible on the guest, it is too late to
377 * clear the pending state. However this is a benign race that can
378 * happen on real hardware, too: If the LPI has already been forwarded
379 * to a CPU interface, a CLEAR request reaching the redistributor has
380 * no effect on that LPI anymore. Since LPIs are edge triggered and
381 * have no active state, we don't need to care about this here.
382 */
383 if ( !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
384 gic_remove_irq_from_queues(vcpu, p);
385
386 spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
387 ret = 0;
388
389 out_unlock:
390 spin_unlock(&its->its_lock);
391
392 return ret;
393 }
394
395 /*
396 * For a given virtual LPI read the enabled bit and priority from the virtual
397 * property table and update the virtual IRQ's state in the given pending_irq.
398 * Must be called with the respective VGIC VCPU lock held.
399 */
update_lpi_property(struct domain * d,struct pending_irq * p)400 static int update_lpi_property(struct domain *d, struct pending_irq *p)
401 {
402 paddr_t addr;
403 uint8_t property;
404 int ret;
405
406 /*
407 * If no redistributor has its LPIs enabled yet, we can't access the
408 * property table. In this case we just can't update the properties,
409 * but this should not be an error from an ITS point of view.
410 * The control flow dependency here and a barrier instruction on the
411 * write side make sure we can access these without taking a lock.
412 */
413 if ( !d->arch.vgic.rdists_enabled )
414 return 0;
415
416 addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12);
417
418 ret = access_guest_memory_by_ipa(d, addr + p->irq - LPI_OFFSET,
419 &property, sizeof(property), false);
420 if ( ret )
421 return ret;
422
423 write_atomic(&p->lpi_priority, property & LPI_PROP_PRIO_MASK);
424
425 if ( property & LPI_PROP_ENABLED )
426 set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
427 else
428 clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
429
430 return 0;
431 }
432
433 /*
434 * Checks whether an LPI that got enabled or disabled needs to change
435 * something in the VGIC (added or removed from the LR or queues).
436 * We don't disable the underlying physical LPI, because this requires
437 * queueing a host LPI command, which we can't afford to do on behalf
438 * of a guest.
439 * Must be called with the VCPU VGIC lock held.
440 */
update_lpi_vgic_status(struct vcpu * v,struct pending_irq * p)441 static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq *p)
442 {
443 ASSERT(spin_is_locked(&v->arch.vgic.lock));
444
445 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
446 {
447 if ( !list_empty(&p->inflight) &&
448 !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
449 gic_raise_guest_irq(v, p->irq, p->lpi_priority);
450 }
451 else
452 gic_remove_from_lr_pending(v, p);
453 }
454
its_handle_inv(struct virt_its * its,uint64_t * cmdptr)455 static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
456 {
457 struct domain *d = its->d;
458 uint32_t devid = its_cmd_get_deviceid(cmdptr);
459 uint32_t eventid = its_cmd_get_id(cmdptr);
460 struct pending_irq *p;
461 unsigned long flags;
462 struct vcpu *vcpu;
463 uint32_t vlpi;
464 int ret = -1;
465
466 /*
467 * If no redistributor has its LPIs enabled yet, we can't access the
468 * property table, so there is no point in executing this command.
469 * The control flow dependency here and a barrier instruction on the
470 * write side make sure we can access these without taking a lock.
471 */
472 if ( !d->arch.vgic.rdists_enabled )
473 return 0;
474
475 spin_lock(&its->its_lock);
476
477 /* Translate the event into a vCPU/vLPI pair. */
478 if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
479 goto out_unlock_its;
480
481 if ( vlpi == INVALID_LPI )
482 goto out_unlock_its;
483
484 p = gicv3_its_get_event_pending_irq(d, its->doorbell_address,
485 devid, eventid);
486 if ( unlikely(!p) )
487 goto out_unlock_its;
488
489 spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
490
491 /* Read the property table and update our cached status. */
492 if ( update_lpi_property(d, p) )
493 goto out_unlock;
494
495 /* Check whether the LPI needs to go on a VCPU. */
496 update_lpi_vgic_status(vcpu, p);
497
498 ret = 0;
499
500 out_unlock:
501 spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
502
503 out_unlock_its:
504 spin_unlock(&its->its_lock);
505
506 return ret;
507 }
508
509 /*
510 * INVALL updates the per-LPI configuration status for every LPI mapped to
511 * a particular redistributor.
512 * We iterate over all mapped LPIs in our radix tree and update those.
513 */
its_handle_invall(struct virt_its * its,uint64_t * cmdptr)514 static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
515 {
516 uint32_t collid = its_cmd_get_collection(cmdptr);
517 struct vcpu *vcpu;
518 struct pending_irq *pirqs[16];
519 uint64_t vlpi = 0; /* 64-bit to catch overflows */
520 unsigned int nr_lpis, i;
521 unsigned long flags;
522 int ret = 0;
523
524 /*
525 * As this implementation walks over all mapped LPIs, it might take
526 * too long for a real guest, so we might want to revisit this
527 * implementation for DomUs.
528 * However this command is very rare, also we don't expect many
529 * LPIs to be actually mapped, so it's fine for Dom0 to use.
530 */
531 ASSERT(is_hardware_domain(its->d));
532
533 /*
534 * If no redistributor has its LPIs enabled yet, we can't access the
535 * property table, so there is no point in executing this command.
536 * The control flow dependency here and a barrier instruction on the
537 * write side make sure we can access these without taking a lock.
538 */
539 if ( !its->d->arch.vgic.rdists_enabled )
540 return 0;
541
542 spin_lock(&its->its_lock);
543 vcpu = get_vcpu_from_collection(its, collid);
544 spin_unlock(&its->its_lock);
545
546 spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
547 read_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
548
549 do
550 {
551 int err;
552
553 nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree,
554 (void **)pirqs, vlpi,
555 ARRAY_SIZE(pirqs));
556
557 for ( i = 0; i < nr_lpis; i++ )
558 {
559 /* We only care about LPIs on our VCPU. */
560 if ( pirqs[i]->lpi_vcpu_id != vcpu->vcpu_id )
561 continue;
562
563 vlpi = pirqs[i]->irq;
564 /* If that fails for a single LPI, carry on to handle the rest. */
565 err = update_lpi_property(its->d, pirqs[i]);
566 if ( !err )
567 update_lpi_vgic_status(vcpu, pirqs[i]);
568 else
569 ret = err;
570 }
571 /*
572 * Loop over the next gang of pending_irqs until we reached the end of
573 * a (fully populated) tree or the lookup function returns less LPIs than
574 * it has been asked for.
575 */
576 } while ( (++vlpi < its->d->arch.vgic.nr_lpis) &&
577 (nr_lpis == ARRAY_SIZE(pirqs)) );
578
579 read_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
580 spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
581
582 return ret;
583 }
584
585 /* Must be called with the ITS lock held. */
its_discard_event(struct virt_its * its,uint32_t vdevid,uint32_t vevid)586 static int its_discard_event(struct virt_its *its,
587 uint32_t vdevid, uint32_t vevid)
588 {
589 struct pending_irq *p;
590 unsigned long flags;
591 struct vcpu *vcpu;
592 uint32_t vlpi;
593
594 ASSERT(spin_is_locked(&its->its_lock));
595
596 if ( !read_itte(its, vdevid, vevid, &vcpu, &vlpi) )
597 return -ENOENT;
598
599 if ( vlpi == INVALID_LPI )
600 return -ENOENT;
601
602 /*
603 * TODO: This relies on the VCPU being correct in the ITS tables.
604 * This can be fixed by either using a per-IRQ lock or by using
605 * the VCPU ID from the pending_irq instead.
606 */
607 spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
608
609 /* Remove the pending_irq from the tree. */
610 write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
611 p = radix_tree_delete(&its->d->arch.vgic.pend_lpi_tree, vlpi);
612 write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
613
614 if ( !p )
615 {
616 spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
617
618 return -ENOENT;
619 }
620
621 /* Cleanup the pending_irq and disconnect it from the LPI. */
622 gic_remove_irq_from_queues(vcpu, p);
623 vgic_init_pending_irq(p, INVALID_LPI);
624
625 spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
626
627 /* Remove the corresponding host LPI entry */
628 return gicv3_remove_guest_event(its->d, its->doorbell_address,
629 vdevid, vevid);
630 }
631
its_unmap_device(struct virt_its * its,uint32_t devid)632 static void its_unmap_device(struct virt_its *its, uint32_t devid)
633 {
634 dev_table_entry_t itt;
635 uint64_t evid;
636
637 spin_lock(&its->its_lock);
638
639 if ( its_get_itt(its, devid, &itt) )
640 goto out;
641
642 /*
643 * For DomUs we need to check that the number of events per device
644 * is really limited, otherwise looping over all events can take too
645 * long for a guest. This ASSERT can then be removed if that is
646 * covered.
647 */
648 ASSERT(is_hardware_domain(its->d));
649
650 for ( evid = 0; evid < DEV_TABLE_ITT_SIZE(itt); evid++ )
651 /* Don't care about errors here, clean up as much as possible. */
652 its_discard_event(its, devid, evid);
653
654 out:
655 spin_unlock(&its->its_lock);
656 }
657
its_handle_mapd(struct virt_its * its,uint64_t * cmdptr)658 static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
659 {
660 /* size and devid get validated by the functions called below. */
661 uint32_t devid = its_cmd_get_deviceid(cmdptr);
662 unsigned int size = its_cmd_get_size(cmdptr) + 1;
663 bool valid = its_cmd_get_validbit(cmdptr);
664 paddr_t itt_addr = its_cmd_get_ittaddr(cmdptr);
665 int ret;
666
667 /* Sanitize the number of events. */
668 if ( valid && (size > its->evid_bits) )
669 return -1;
670
671 if ( !valid )
672 /* Discard all events and remove pending LPIs. */
673 its_unmap_device(its, devid);
674
675 /*
676 * There is no easy and clean way for Xen to know the ITS device ID of a
677 * particular (PCI) device, so we have to rely on the guest telling
678 * us about it. For *now* we are just using the device ID *Dom0* uses,
679 * because the driver there has the actual knowledge.
680 * Eventually this will be replaced with a dedicated hypercall to
681 * announce pass-through of devices.
682 */
683 if ( is_hardware_domain(its->d) )
684 {
685
686 /*
687 * Dom0's ITSes are mapped 1:1, so both addresses are the same.
688 * Also the device IDs are equal.
689 */
690 ret = gicv3_its_map_guest_device(its->d, its->doorbell_address, devid,
691 its->doorbell_address, devid,
692 BIT(size), valid);
693 if ( ret && valid )
694 return ret;
695 }
696
697 spin_lock(&its->its_lock);
698
699 if ( valid )
700 ret = its_set_itt_address(its, devid, itt_addr, size);
701 else
702 ret = its_set_itt_address(its, devid, INVALID_PADDR, 1);
703
704 spin_unlock(&its->its_lock);
705
706 return ret;
707 }
708
its_handle_mapti(struct virt_its * its,uint64_t * cmdptr)709 static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
710 {
711 uint32_t devid = its_cmd_get_deviceid(cmdptr);
712 uint32_t eventid = its_cmd_get_id(cmdptr);
713 uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
714 uint16_t collid = its_cmd_get_collection(cmdptr);
715 struct pending_irq *pirq;
716 struct vcpu *vcpu = NULL;
717 int ret = -1;
718
719 if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
720 intid = eventid;
721
722 spin_lock(&its->its_lock);
723 /*
724 * Check whether there is a valid existing mapping. If yes, behavior is
725 * unpredictable, we choose to ignore this command here.
726 * This makes sure we start with a pristine pending_irq below.
727 */
728 if ( read_itte(its, devid, eventid, &vcpu, &_intid) &&
729 _intid != INVALID_LPI )
730 {
731 spin_unlock(&its->its_lock);
732 return -1;
733 }
734
735 /* Sanitize collection ID and interrupt ID */
736 vcpu = get_vcpu_from_collection(its, collid);
737 if ( !vcpu || intid >= its->d->arch.vgic.nr_lpis )
738 {
739 spin_unlock(&its->its_lock);
740 return -1;
741 }
742
743 /* Enter the mapping in our virtual ITS tables. */
744 if ( !write_itte(its, devid, eventid, collid, intid) )
745 {
746 spin_unlock(&its->its_lock);
747 return -1;
748 }
749
750 spin_unlock(&its->its_lock);
751
752 /*
753 * Connect this virtual LPI to the corresponding host LPI, which is
754 * determined by the same device ID and event ID on the host side.
755 * This returns us the corresponding, still unused pending_irq.
756 */
757 pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
758 devid, eventid, intid);
759 if ( !pirq )
760 goto out_remove_mapping;
761
762 vgic_init_pending_irq(pirq, intid);
763
764 /*
765 * Now read the guest's property table to initialize our cached state.
766 * We don't need the VGIC VCPU lock here, because the pending_irq isn't
767 * in the radix tree yet.
768 */
769 ret = update_lpi_property(its->d, pirq);
770 if ( ret )
771 goto out_remove_host_entry;
772
773 pirq->lpi_vcpu_id = vcpu->vcpu_id;
774 /*
775 * Mark this LPI as new, so any older (now unmapped) LPI in any LR
776 * can be easily recognised as such.
777 */
778 set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
779
780 /*
781 * Now insert the pending_irq into the domain's LPI tree, so that
782 * it becomes live.
783 */
784 write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
785 ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
786 write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
787
788 if ( !ret )
789 return 0;
790
791 /*
792 * radix_tree_insert() returns an error either due to an internal
793 * condition (like memory allocation failure) or because the LPI already
794 * existed in the tree. We don't support the latter case, so we always
795 * cleanup and return an error here in any case.
796 */
797 out_remove_host_entry:
798 gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
799
800 out_remove_mapping:
801 spin_lock(&its->its_lock);
802 write_itte(its, devid, eventid, UNMAPPED_COLLECTION, INVALID_LPI);
803 spin_unlock(&its->its_lock);
804
805 return ret;
806 }
807
its_handle_movi(struct virt_its * its,uint64_t * cmdptr)808 static int its_handle_movi(struct virt_its *its, uint64_t *cmdptr)
809 {
810 uint32_t devid = its_cmd_get_deviceid(cmdptr);
811 uint32_t eventid = its_cmd_get_id(cmdptr);
812 uint16_t collid = its_cmd_get_collection(cmdptr);
813 unsigned long flags;
814 struct pending_irq *p;
815 struct vcpu *ovcpu, *nvcpu;
816 uint32_t vlpi;
817 int ret = -1;
818
819 spin_lock(&its->its_lock);
820 /* Check for a mapped LPI and get the LPI number. */
821 if ( !read_itte(its, devid, eventid, &ovcpu, &vlpi) )
822 goto out_unlock;
823
824 if ( vlpi == INVALID_LPI )
825 goto out_unlock;
826
827 /* Check the new collection ID and get the new VCPU pointer */
828 nvcpu = get_vcpu_from_collection(its, collid);
829 if ( !nvcpu )
830 goto out_unlock;
831
832 p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address,
833 devid, eventid);
834 if ( unlikely(!p) )
835 goto out_unlock;
836
837 /*
838 * TODO: This relies on the VCPU being correct in the ITS tables.
839 * This can be fixed by either using a per-IRQ lock or by using
840 * the VCPU ID from the pending_irq instead.
841 */
842 spin_lock_irqsave(&ovcpu->arch.vgic.lock, flags);
843
844 /* Update our cached vcpu_id in the pending_irq. */
845 p->lpi_vcpu_id = nvcpu->vcpu_id;
846
847 spin_unlock_irqrestore(&ovcpu->arch.vgic.lock, flags);
848
849 /*
850 * TODO: Investigate if and how to migrate an already pending LPI. This
851 * is not really critical, as these benign races happen in hardware too
852 * (an affinity change may come too late for a just fired IRQ), but may
853 * simplify the code if we can keep the IRQ's associated VCPU in sync,
854 * so that we don't have to deal with special cases anymore.
855 * Migrating those LPIs is not easy to do at the moment anyway, but should
856 * become easier with the introduction of a per-IRQ lock.
857 */
858
859 /* Now store the new collection in the translation table. */
860 if ( !write_itte(its, devid, eventid, collid, vlpi) )
861 goto out_unlock;
862
863 ret = 0;
864
865 out_unlock:
866 spin_unlock(&its->its_lock);
867
868 return ret;
869 }
870
its_handle_discard(struct virt_its * its,uint64_t * cmdptr)871 static int its_handle_discard(struct virt_its *its, uint64_t *cmdptr)
872 {
873 uint32_t devid = its_cmd_get_deviceid(cmdptr);
874 uint32_t eventid = its_cmd_get_id(cmdptr);
875 int ret;
876
877 spin_lock(&its->its_lock);
878
879 /* Remove from the radix tree and remove the host entry. */
880 ret = its_discard_event(its, devid, eventid);
881 if ( ret )
882 goto out_unlock;
883
884 /* Remove from the guest's ITTE. */
885 if ( !write_itte(its, devid, eventid, UNMAPPED_COLLECTION, INVALID_LPI) )
886 ret = -1;
887
888 out_unlock:
889 spin_unlock(&its->its_lock);
890
891 return ret;
892 }
893
894 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
895 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
896
dump_its_command(uint64_t * command)897 static void dump_its_command(uint64_t *command)
898 {
899 gdprintk(XENLOG_WARNING, " cmd 0x%02lx: %016lx %016lx %016lx %016lx\n",
900 its_cmd_get_command(command),
901 command[0], command[1], command[2], command[3]);
902 }
903
904 /*
905 * Must be called with the vcmd_lock held.
906 * TODO: Investigate whether we can be smarter here and don't need to hold
907 * the lock all of the time.
908 */
vgic_its_handle_cmds(struct domain * d,struct virt_its * its)909 static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
910 {
911 paddr_t addr = its->cbaser & GENMASK(51, 12);
912 uint64_t command[4];
913
914 ASSERT(spin_is_locked(&its->vcmd_lock));
915
916 if ( its->cwriter >= ITS_CMD_BUFFER_SIZE(its->cbaser) )
917 return -1;
918
919 while ( its->creadr != its->cwriter )
920 {
921 int ret;
922
923 ret = access_guest_memory_by_ipa(d, addr + its->creadr,
924 command, sizeof(command), false);
925 if ( ret )
926 return ret;
927
928 switch ( its_cmd_get_command(command) )
929 {
930 case GITS_CMD_CLEAR:
931 ret = its_handle_clear(its, command);
932 break;
933 case GITS_CMD_DISCARD:
934 ret = its_handle_discard(its, command);
935 break;
936 case GITS_CMD_INT:
937 ret = its_handle_int(its, command);
938 break;
939 case GITS_CMD_INV:
940 ret = its_handle_inv(its, command);
941 break;
942 case GITS_CMD_INVALL:
943 ret = its_handle_invall(its, command);
944 break;
945 case GITS_CMD_MAPC:
946 ret = its_handle_mapc(its, command);
947 break;
948 case GITS_CMD_MAPD:
949 ret = its_handle_mapd(its, command);
950 break;
951 case GITS_CMD_MAPI:
952 case GITS_CMD_MAPTI:
953 ret = its_handle_mapti(its, command);
954 break;
955 case GITS_CMD_MOVALL:
956 gdprintk(XENLOG_G_INFO, "vGITS: ignoring MOVALL command\n");
957 break;
958 case GITS_CMD_MOVI:
959 ret = its_handle_movi(its, command);
960 break;
961 case GITS_CMD_SYNC:
962 /* We handle ITS commands synchronously, so we ignore SYNC. */
963 break;
964 default:
965 gdprintk(XENLOG_WARNING, "vGITS: unhandled ITS command\n");
966 dump_its_command(command);
967 break;
968 }
969
970 write_u64_atomic(&its->creadr, (its->creadr + ITS_CMD_SIZE) %
971 ITS_CMD_BUFFER_SIZE(its->cbaser));
972
973 if ( ret )
974 {
975 gdprintk(XENLOG_WARNING,
976 "vGITS: ITS command error %d while handling command\n",
977 ret);
978 dump_its_command(command);
979 }
980 }
981
982 return 0;
983 }
984
985 /*****************************
986 * ITS registers read access *
987 *****************************/
988
989 /* Identifying as an ARM IP, using "X" as the product ID. */
990 #define GITS_IIDR_VALUE 0x5800034c
991
vgic_v3_its_mmio_read(struct vcpu * v,mmio_info_t * info,register_t * r,void * priv)992 static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info,
993 register_t *r, void *priv)
994 {
995 struct virt_its *its = priv;
996 uint64_t reg;
997
998 switch ( info->gpa & 0xffff )
999 {
1000 case VREG32(GITS_CTLR):
1001 {
1002 /*
1003 * We try to avoid waiting for the command queue lock and report
1004 * non-quiescent if that lock is already taken.
1005 */
1006 bool have_cmd_lock;
1007
1008 if ( info->dabt.size != DABT_WORD ) goto bad_width;
1009
1010 have_cmd_lock = spin_trylock(&its->vcmd_lock);
1011 reg = its->enabled ? GITS_CTLR_ENABLE : 0;
1012
1013 if ( have_cmd_lock && its->cwriter == its->creadr )
1014 reg |= GITS_CTLR_QUIESCENT;
1015
1016 if ( have_cmd_lock )
1017 spin_unlock(&its->vcmd_lock);
1018
1019 *r = vreg_reg32_extract(reg, info);
1020 break;
1021 }
1022
1023 case VREG32(GITS_IIDR):
1024 if ( info->dabt.size != DABT_WORD ) goto bad_width;
1025 *r = vreg_reg32_extract(GITS_IIDR_VALUE, info);
1026 break;
1027
1028 case VREG64(GITS_TYPER):
1029 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1030
1031 reg = GITS_TYPER_PHYSICAL;
1032 reg |= (sizeof(struct vits_itte) - 1) << GITS_TYPER_ITT_SIZE_SHIFT;
1033 reg |= (its->evid_bits - 1) << GITS_TYPER_IDBITS_SHIFT;
1034 reg |= (its->devid_bits - 1) << GITS_TYPER_DEVIDS_SHIFT;
1035 *r = vreg_reg64_extract(reg, info);
1036 break;
1037
1038 case VRANGE32(0x0018, 0x001C):
1039 goto read_reserved;
1040 case VRANGE32(0x0020, 0x003C):
1041 goto read_impl_defined;
1042 case VRANGE32(0x0040, 0x007C):
1043 goto read_reserved;
1044
1045 case VREG64(GITS_CBASER):
1046 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1047 spin_lock(&its->its_lock);
1048 *r = vreg_reg64_extract(its->cbaser, info);
1049 spin_unlock(&its->its_lock);
1050 break;
1051
1052 case VREG64(GITS_CWRITER):
1053 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1054
1055 /* CWRITER is only written by the guest, so no extra locking here. */
1056 reg = its->cwriter;
1057 *r = vreg_reg64_extract(reg, info);
1058 break;
1059
1060 case VREG64(GITS_CREADR):
1061 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1062
1063 /*
1064 * Lockless access, to avoid waiting for the whole command queue to be
1065 * finished completely. Xen updates its->creadr atomically after each
1066 * command has been handled, this allows other VCPUs to monitor the
1067 * progress.
1068 */
1069 reg = read_u64_atomic(&its->creadr);
1070 *r = vreg_reg64_extract(reg, info);
1071 break;
1072
1073 case VRANGE64(0x0098, 0x00F8):
1074 goto read_reserved;
1075
1076 case VREG64(GITS_BASER0): /* device table */
1077 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1078 spin_lock(&its->its_lock);
1079 *r = vreg_reg64_extract(its->baser_dev, info);
1080 spin_unlock(&its->its_lock);
1081 break;
1082
1083 case VREG64(GITS_BASER1): /* collection table */
1084 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1085 spin_lock(&its->its_lock);
1086 *r = vreg_reg64_extract(its->baser_coll, info);
1087 spin_unlock(&its->its_lock);
1088 break;
1089
1090 case VRANGE64(GITS_BASER2, GITS_BASER7):
1091 goto read_as_zero_64;
1092 case VRANGE32(0x0140, 0xBFFC):
1093 goto read_reserved;
1094 case VRANGE32(0xC000, 0xFFCC):
1095 goto read_impl_defined;
1096 case VRANGE32(0xFFD0, 0xFFE4):
1097 goto read_impl_defined;
1098
1099 case VREG32(GITS_PIDR2):
1100 if ( info->dabt.size != DABT_WORD ) goto bad_width;
1101 *r = vreg_reg32_extract(GIC_PIDR2_ARCH_GICv3, info);
1102 break;
1103
1104 case VRANGE32(0xFFEC, 0xFFFC):
1105 goto read_impl_defined;
1106
1107 default:
1108 printk(XENLOG_G_ERR
1109 "%pv: vGITS: unhandled read r%d offset %#04lx\n",
1110 v, info->dabt.reg, (unsigned long)info->gpa & 0xffff);
1111 return 0;
1112 }
1113
1114 return 1;
1115
1116 read_as_zero_64:
1117 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1118 *r = 0;
1119
1120 return 1;
1121
1122 read_impl_defined:
1123 printk(XENLOG_G_DEBUG
1124 "%pv: vGITS: RAZ on implementation defined register offset %#04lx\n",
1125 v, info->gpa & 0xffff);
1126 *r = 0;
1127 return 1;
1128
1129 read_reserved:
1130 printk(XENLOG_G_DEBUG
1131 "%pv: vGITS: RAZ on reserved register offset %#04lx\n",
1132 v, info->gpa & 0xffff);
1133 *r = 0;
1134 return 1;
1135
1136 bad_width:
1137 printk(XENLOG_G_ERR "vGITS: bad read width %d r%d offset %#04lx\n",
1138 info->dabt.size, info->dabt.reg, (unsigned long)info->gpa & 0xffff);
1139 domain_crash_synchronous();
1140
1141 return 0;
1142 }
1143
1144 /******************************
1145 * ITS registers write access *
1146 ******************************/
1147
its_baser_table_size(uint64_t baser)1148 static unsigned int its_baser_table_size(uint64_t baser)
1149 {
1150 unsigned int ret, page_size[4] = {SZ_4K, SZ_16K, SZ_64K, SZ_64K};
1151
1152 ret = page_size[(baser >> GITS_BASER_PAGE_SIZE_SHIFT) & 3];
1153
1154 return ret * ((baser & GITS_BASER_SIZE_MASK) + 1);
1155 }
1156
its_baser_nr_entries(uint64_t baser)1157 static unsigned int its_baser_nr_entries(uint64_t baser)
1158 {
1159 unsigned int entry_size = GITS_BASER_ENTRY_SIZE(baser);
1160
1161 return its_baser_table_size(baser) / entry_size;
1162 }
1163
1164 /* Must be called with the ITS lock held. */
vgic_v3_verify_its_status(struct virt_its * its,bool status)1165 static bool vgic_v3_verify_its_status(struct virt_its *its, bool status)
1166 {
1167 ASSERT(spin_is_locked(&its->its_lock));
1168
1169 if ( !status )
1170 return false;
1171
1172 if ( !(its->cbaser & GITS_VALID_BIT) ||
1173 !(its->baser_dev & GITS_VALID_BIT) ||
1174 !(its->baser_coll & GITS_VALID_BIT) )
1175 {
1176 printk(XENLOG_G_WARNING "d%d tried to enable ITS without having the tables configured.\n",
1177 its->d->domain_id);
1178 return false;
1179 }
1180
1181 /*
1182 * TODO: Protect against a guest crafting ITS tables.
1183 * The spec says that "at the time of the new allocation for use by the ITS"
1184 * all tables must contain zeroes. We could enforce this here by clearing
1185 * all the tables, but this would be moot since at the moment the guest
1186 * can change the tables at any point in time anyway. Right now there are
1187 * expectations about the tables being consistent (a VCPU lock protecting
1188 * an LPI), which should go away with proper per-IRQ locking.
1189 * So for now we ignore this issue and rely on Dom0 not doing bad things.
1190 */
1191 ASSERT(is_hardware_domain(its->d));
1192
1193 return true;
1194 }
1195
sanitize_its_base_reg(uint64_t * reg)1196 static void sanitize_its_base_reg(uint64_t *reg)
1197 {
1198 uint64_t r = *reg;
1199
1200 /* Avoid outer shareable. */
1201 switch ( (r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03 )
1202 {
1203 case GIC_BASER_OuterShareable:
1204 r &= ~GITS_BASER_SHAREABILITY_MASK;
1205 r |= GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT;
1206 break;
1207 default:
1208 break;
1209 }
1210
1211 /* Avoid any inner non-cacheable mapping. */
1212 switch ( (r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07 )
1213 {
1214 case GIC_BASER_CACHE_nCnB:
1215 case GIC_BASER_CACHE_nC:
1216 r &= ~GITS_BASER_INNER_CACHEABILITY_MASK;
1217 r |= GIC_BASER_CACHE_RaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT;
1218 break;
1219 default:
1220 break;
1221 }
1222
1223 /* Only allow non-cacheable or same-as-inner. */
1224 switch ( (r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07 )
1225 {
1226 case GIC_BASER_CACHE_SameAsInner:
1227 case GIC_BASER_CACHE_nC:
1228 break;
1229 default:
1230 r &= ~GITS_BASER_OUTER_CACHEABILITY_MASK;
1231 r |= GIC_BASER_CACHE_nC << GITS_BASER_OUTER_CACHEABILITY_SHIFT;
1232 break;
1233 }
1234
1235 *reg = r;
1236 }
1237
vgic_v3_its_mmio_write(struct vcpu * v,mmio_info_t * info,register_t r,void * priv)1238 static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
1239 register_t r, void *priv)
1240 {
1241 struct domain *d = v->domain;
1242 struct virt_its *its = priv;
1243 uint64_t reg;
1244 uint32_t reg32;
1245
1246 switch ( info->gpa & 0xffff )
1247 {
1248 case VREG32(GITS_CTLR):
1249 {
1250 uint32_t ctlr;
1251
1252 if ( info->dabt.size != DABT_WORD ) goto bad_width;
1253
1254 /*
1255 * We need to take the vcmd_lock to prevent a guest from disabling
1256 * the ITS while commands are still processed.
1257 */
1258 spin_lock(&its->vcmd_lock);
1259 spin_lock(&its->its_lock);
1260 ctlr = its->enabled ? GITS_CTLR_ENABLE : 0;
1261 reg32 = ctlr;
1262 vreg_reg32_update(®32, r, info);
1263
1264 if ( ctlr ^ reg32 )
1265 its->enabled = vgic_v3_verify_its_status(its,
1266 reg32 & GITS_CTLR_ENABLE);
1267 spin_unlock(&its->its_lock);
1268 spin_unlock(&its->vcmd_lock);
1269 return 1;
1270 }
1271
1272 case VREG32(GITS_IIDR):
1273 goto write_ignore_32;
1274
1275 case VREG32(GITS_TYPER):
1276 goto write_ignore_32;
1277
1278 case VRANGE32(0x0018, 0x001C):
1279 goto write_reserved;
1280 case VRANGE32(0x0020, 0x003C):
1281 goto write_impl_defined;
1282 case VRANGE32(0x0040, 0x007C):
1283 goto write_reserved;
1284
1285 case VREG64(GITS_CBASER):
1286 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1287
1288 spin_lock(&its->its_lock);
1289 /* Changing base registers with the ITS enabled is UNPREDICTABLE. */
1290 if ( its->enabled )
1291 {
1292 spin_unlock(&its->its_lock);
1293 gdprintk(XENLOG_WARNING,
1294 "vGITS: tried to change CBASER with the ITS enabled.\n");
1295 return 1;
1296 }
1297
1298 reg = its->cbaser;
1299 vreg_reg64_update(®, r, info);
1300 sanitize_its_base_reg(®);
1301
1302 its->cbaser = reg;
1303 its->creadr = 0;
1304 spin_unlock(&its->its_lock);
1305
1306 return 1;
1307
1308 case VREG64(GITS_CWRITER):
1309 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1310
1311 spin_lock(&its->vcmd_lock);
1312 reg = ITS_CMD_OFFSET(its->cwriter);
1313 vreg_reg64_update(®, r, info);
1314 its->cwriter = ITS_CMD_OFFSET(reg);
1315
1316 if ( its->enabled )
1317 if ( vgic_its_handle_cmds(d, its) )
1318 gdprintk(XENLOG_WARNING, "error handling ITS commands\n");
1319
1320 spin_unlock(&its->vcmd_lock);
1321
1322 return 1;
1323
1324 case VREG64(GITS_CREADR):
1325 goto write_ignore_64;
1326
1327 case VRANGE32(0x0098, 0x00FC):
1328 goto write_reserved;
1329
1330 case VREG64(GITS_BASER0): /* device table */
1331 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1332
1333 spin_lock(&its->its_lock);
1334
1335 /*
1336 * Changing base registers with the ITS enabled is UNPREDICTABLE,
1337 * we choose to ignore it, but warn.
1338 */
1339 if ( its->enabled )
1340 {
1341 spin_unlock(&its->its_lock);
1342 gdprintk(XENLOG_WARNING, "vGITS: tried to change BASER with the ITS enabled.\n");
1343
1344 return 1;
1345 }
1346
1347 reg = its->baser_dev;
1348 vreg_reg64_update(®, r, info);
1349
1350 /* We don't support indirect tables for now. */
1351 reg &= ~(GITS_BASER_RO_MASK | GITS_BASER_INDIRECT);
1352 reg |= (sizeof(dev_table_entry_t) - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1353 reg |= GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT;
1354 sanitize_its_base_reg(®);
1355
1356 if ( reg & GITS_VALID_BIT )
1357 {
1358 its->max_devices = its_baser_nr_entries(reg);
1359 if ( its->max_devices > BIT(its->devid_bits) )
1360 its->max_devices = BIT(its->devid_bits);
1361 }
1362 else
1363 its->max_devices = 0;
1364
1365 its->baser_dev = reg;
1366 spin_unlock(&its->its_lock);
1367 return 1;
1368
1369 case VREG64(GITS_BASER1): /* collection table */
1370 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1371
1372 spin_lock(&its->its_lock);
1373 /*
1374 * Changing base registers with the ITS enabled is UNPREDICTABLE,
1375 * we choose to ignore it, but warn.
1376 */
1377 if ( its->enabled )
1378 {
1379 spin_unlock(&its->its_lock);
1380 gdprintk(XENLOG_INFO, "vGITS: tried to change BASER with the ITS enabled.\n");
1381 return 1;
1382 }
1383
1384 reg = its->baser_coll;
1385 vreg_reg64_update(®, r, info);
1386 /* No indirect tables for the collection table. */
1387 reg &= ~(GITS_BASER_RO_MASK | GITS_BASER_INDIRECT);
1388 reg |= (sizeof(coll_table_entry_t) - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1389 reg |= GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT;
1390 sanitize_its_base_reg(®);
1391
1392 if ( reg & GITS_VALID_BIT )
1393 its->max_collections = its_baser_nr_entries(reg);
1394 else
1395 its->max_collections = 0;
1396 its->baser_coll = reg;
1397 spin_unlock(&its->its_lock);
1398 return 1;
1399
1400 case VRANGE64(GITS_BASER2, GITS_BASER7):
1401 goto write_ignore_64;
1402
1403 case VRANGE32(0x0140, 0xBFFC):
1404 goto write_reserved;
1405 case VRANGE32(0xC000, 0xFFCC):
1406 goto write_impl_defined;
1407 case VRANGE32(0xFFD0, 0xFFE4): /* IMPDEF identification registers */
1408 goto write_impl_defined;
1409
1410 case VREG32(GITS_PIDR2):
1411 goto write_ignore_32;
1412
1413 case VRANGE32(0xFFEC, 0xFFFC): /* IMPDEF identification registers */
1414 goto write_impl_defined;
1415
1416 default:
1417 printk(XENLOG_G_ERR
1418 "%pv: vGITS: unhandled write r%d offset %#04lx\n",
1419 v, info->dabt.reg, (unsigned long)info->gpa & 0xffff);
1420 return 0;
1421 }
1422
1423 return 1;
1424
1425 write_ignore_64:
1426 if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width;
1427 return 1;
1428
1429 write_ignore_32:
1430 if ( info->dabt.size != DABT_WORD ) goto bad_width;
1431 return 1;
1432
1433 write_impl_defined:
1434 printk(XENLOG_G_DEBUG
1435 "%pv: vGITS: WI on implementation defined register offset %#04lx\n",
1436 v, info->gpa & 0xffff);
1437 return 1;
1438
1439 write_reserved:
1440 printk(XENLOG_G_DEBUG
1441 "%pv: vGITS: WI on implementation defined register offset %#04lx\n",
1442 v, info->gpa & 0xffff);
1443 return 1;
1444
1445 bad_width:
1446 printk(XENLOG_G_ERR "vGITS: bad write width %d r%d offset %#08lx\n",
1447 info->dabt.size, info->dabt.reg, (unsigned long)info->gpa & 0xffff);
1448
1449 domain_crash_synchronous();
1450
1451 return 0;
1452 }
1453
1454 static const struct mmio_handler_ops vgic_its_mmio_handler = {
1455 .read = vgic_v3_its_mmio_read,
1456 .write = vgic_v3_its_mmio_write,
1457 };
1458
vgic_v3_its_init_virtual(struct domain * d,paddr_t guest_addr,unsigned int devid_bits,unsigned int evid_bits)1459 static int vgic_v3_its_init_virtual(struct domain *d, paddr_t guest_addr,
1460 unsigned int devid_bits,
1461 unsigned int evid_bits)
1462 {
1463 struct virt_its *its;
1464 uint64_t base_attr;
1465
1466 its = xzalloc(struct virt_its);
1467 if ( !its )
1468 return -ENOMEM;
1469
1470 base_attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT;
1471 base_attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT;
1472 base_attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT;
1473
1474 its->cbaser = base_attr;
1475 base_attr |= 0ULL << GITS_BASER_PAGE_SIZE_SHIFT; /* 4K pages */
1476 its->baser_dev = GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT;
1477 its->baser_dev |= (sizeof(dev_table_entry_t) - 1) <<
1478 GITS_BASER_ENTRY_SIZE_SHIFT;
1479 its->baser_dev |= base_attr;
1480 its->baser_coll = GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT;
1481 its->baser_coll |= (sizeof(coll_table_entry_t) - 1) <<
1482 GITS_BASER_ENTRY_SIZE_SHIFT;
1483 its->baser_coll |= base_attr;
1484 its->d = d;
1485 its->doorbell_address = guest_addr + ITS_DOORBELL_OFFSET;
1486 its->devid_bits = devid_bits;
1487 its->evid_bits = evid_bits;
1488 spin_lock_init(&its->vcmd_lock);
1489 spin_lock_init(&its->its_lock);
1490
1491 register_mmio_handler(d, &vgic_its_mmio_handler, guest_addr, SZ_64K, its);
1492
1493 /* Register the virtual ITS to be able to clean it up later. */
1494 list_add_tail(&its->vits_list, &d->arch.vgic.vits_list);
1495
1496 return 0;
1497 }
1498
vgic_v3_its_count(const struct domain * d)1499 unsigned int vgic_v3_its_count(const struct domain *d)
1500 {
1501 struct host_its *hw_its;
1502 unsigned int ret = 0;
1503
1504 /* Only Dom0 can use emulated ITSes so far. */
1505 if ( !is_hardware_domain(d) )
1506 return 0;
1507
1508 list_for_each_entry(hw_its, &host_its_list, entry)
1509 ret++;
1510
1511 return ret;
1512 }
1513
1514 /*
1515 * For a hardware domain, this will iterate over the host ITSes
1516 * and map one virtual ITS per host ITS at the same address.
1517 */
vgic_v3_its_init_domain(struct domain * d)1518 int vgic_v3_its_init_domain(struct domain *d)
1519 {
1520 int ret;
1521
1522 INIT_LIST_HEAD(&d->arch.vgic.vits_list);
1523 spin_lock_init(&d->arch.vgic.its_devices_lock);
1524 d->arch.vgic.its_devices = RB_ROOT;
1525
1526 if ( is_hardware_domain(d) )
1527 {
1528 struct host_its *hw_its;
1529
1530 list_for_each_entry(hw_its, &host_its_list, entry)
1531 {
1532 /*
1533 * For each host ITS create a virtual ITS using the same
1534 * base and thus doorbell address.
1535 * Use the same number of device ID and event ID bits as the host.
1536 */
1537 ret = vgic_v3_its_init_virtual(d, hw_its->addr,
1538 hw_its->devid_bits,
1539 hw_its->evid_bits);
1540 if ( ret )
1541 return ret;
1542 else
1543 d->arch.vgic.has_its = true;
1544 }
1545 }
1546
1547 return 0;
1548 }
1549
vgic_v3_its_free_domain(struct domain * d)1550 void vgic_v3_its_free_domain(struct domain *d)
1551 {
1552 struct virt_its *pos, *temp;
1553
1554 list_for_each_entry_safe( pos, temp, &d->arch.vgic.vits_list, vits_list )
1555 {
1556 list_del(&pos->vits_list);
1557 xfree(pos);
1558 }
1559
1560 ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
1561 }
1562
1563 /*
1564 * Local variables:
1565 * mode: C
1566 * c-file-style: "BSD"
1567 * c-basic-offset: 4
1568 * indent-tabs-mode: nil
1569 * End:
1570 */
1571