1 /*
2  * xen/arch/arm/vgic.c
3  *
4  * ARM Virtual Generic Interrupt Controller support
5  *
6  * Ian Campbell <ian.campbell@citrix.com>
7  * Copyright (c) 2011 Citrix Systems.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <xen/bitops.h>
21 #include <xen/lib.h>
22 #include <xen/init.h>
23 #include <xen/domain_page.h>
24 #include <xen/softirq.h>
25 #include <xen/irq.h>
26 #include <xen/sched.h>
27 #include <xen/perfc.h>
28 
29 #include <asm/current.h>
30 
31 #include <asm/mmio.h>
32 #include <asm/gic.h>
33 #include <asm/vgic.h>
34 
vgic_get_rank(struct vcpu * v,int rank)35 static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
36 {
37     if ( rank == 0 )
38         return v->arch.vgic.private_irqs;
39     else if ( rank <= DOMAIN_NR_RANKS(v->domain) )
40         return &v->domain->arch.vgic.shared_irqs[rank - 1];
41     else
42         return NULL;
43 }
44 
45 /*
46  * Returns rank corresponding to a GICD_<FOO><n> register for
47  * GICD_<FOO> with <b>-bits-per-interrupt.
48  */
vgic_rank_offset(struct vcpu * v,int b,int n,int s)49 struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
50                                               int s)
51 {
52     int rank = REG_RANK_NR(b, (n >> s));
53 
54     return vgic_get_rank(v, rank);
55 }
56 
vgic_rank_irq(struct vcpu * v,unsigned int irq)57 struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq)
58 {
59     int rank = irq/32;
60 
61     return vgic_get_rank(v, rank);
62 }
63 
vgic_init_pending_irq(struct pending_irq * p,unsigned int virq)64 void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
65 {
66     /* The lpi_vcpu_id field must be big enough to hold a VCPU ID. */
67     BUILD_BUG_ON(BIT(sizeof(p->lpi_vcpu_id) * 8) < MAX_VIRT_CPUS);
68 
69     memset(p, 0, sizeof(*p));
70     INIT_LIST_HEAD(&p->inflight);
71     INIT_LIST_HEAD(&p->lr_queue);
72     p->irq = virq;
73     p->lpi_vcpu_id = INVALID_VCPU_ID;
74 }
75 
vgic_rank_init(struct vgic_irq_rank * rank,uint8_t index,unsigned int vcpu)76 static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
77                            unsigned int vcpu)
78 {
79     unsigned int i;
80 
81     /*
82      * Make sure that the type chosen to store the target is able to
83      * store an VCPU ID between 0 and the maximum of virtual CPUs
84      * supported.
85      */
86     BUILD_BUG_ON((1 << (sizeof(rank->vcpu[0]) * 8)) < MAX_VIRT_CPUS);
87 
88     spin_lock_init(&rank->lock);
89 
90     rank->index = index;
91 
92     for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
93         write_atomic(&rank->vcpu[i], vcpu);
94 }
95 
domain_vgic_register(struct domain * d,int * mmio_count)96 int domain_vgic_register(struct domain *d, int *mmio_count)
97 {
98     switch ( d->arch.vgic.version )
99     {
100 #ifdef CONFIG_HAS_GICV3
101     case GIC_V3:
102         if ( vgic_v3_init(d, mmio_count) )
103            return -ENODEV;
104         break;
105 #endif
106     case GIC_V2:
107         if ( vgic_v2_init(d, mmio_count) )
108             return -ENODEV;
109         break;
110     default:
111         printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n",
112                d->domain_id, d->arch.vgic.version);
113         return -ENODEV;
114     }
115 
116     return 0;
117 }
118 
domain_vgic_init(struct domain * d,unsigned int nr_spis)119 int domain_vgic_init(struct domain *d, unsigned int nr_spis)
120 {
121     int i;
122     int ret;
123 
124     d->arch.vgic.ctlr = 0;
125 
126     /* Limit the number of virtual SPIs supported to (1020 - 32) = 988  */
127     if ( nr_spis > (1020 - NR_LOCAL_IRQS) )
128         return -EINVAL;
129 
130     d->arch.vgic.nr_spis = nr_spis;
131 
132     spin_lock_init(&d->arch.vgic.lock);
133 
134     d->arch.vgic.shared_irqs =
135         xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
136     if ( d->arch.vgic.shared_irqs == NULL )
137         return -ENOMEM;
138 
139     d->arch.vgic.pending_irqs =
140         xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis);
141     if ( d->arch.vgic.pending_irqs == NULL )
142         return -ENOMEM;
143 
144     for (i=0; i<d->arch.vgic.nr_spis; i++)
145         vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32);
146 
147     /* SPIs are routed to VCPU0 by default */
148     for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
149         vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0);
150 
151     ret = d->arch.vgic.handler->domain_init(d);
152     if ( ret )
153         return ret;
154 
155     d->arch.vgic.allocated_irqs =
156         xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d)));
157     if ( !d->arch.vgic.allocated_irqs )
158         return -ENOMEM;
159 
160     /* vIRQ0-15 (SGIs) are reserved */
161     for ( i = 0; i < NR_GIC_SGI; i++ )
162         set_bit(i, d->arch.vgic.allocated_irqs);
163 
164     return 0;
165 }
166 
register_vgic_ops(struct domain * d,const struct vgic_ops * ops)167 void register_vgic_ops(struct domain *d, const struct vgic_ops *ops)
168 {
169    d->arch.vgic.handler = ops;
170 }
171 
domain_vgic_free(struct domain * d)172 void domain_vgic_free(struct domain *d)
173 {
174     int i;
175     int ret;
176 
177     for ( i = 0; i < (d->arch.vgic.nr_spis); i++ )
178     {
179         struct pending_irq *p = spi_to_pending(d, i + 32);
180 
181         if ( p->desc )
182         {
183             ret = release_guest_irq(d, p->irq);
184             if ( ret )
185                 dprintk(XENLOG_G_WARNING, "d%u: Failed to release virq %u ret = %d\n",
186                         d->domain_id, p->irq, ret);
187         }
188     }
189 
190     if ( d->arch.vgic.handler )
191         d->arch.vgic.handler->domain_free(d);
192     xfree(d->arch.vgic.shared_irqs);
193     xfree(d->arch.vgic.pending_irqs);
194     xfree(d->arch.vgic.allocated_irqs);
195 }
196 
vcpu_vgic_init(struct vcpu * v)197 int vcpu_vgic_init(struct vcpu *v)
198 {
199     int i;
200 
201     v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
202     if ( v->arch.vgic.private_irqs == NULL )
203       return -ENOMEM;
204 
205     /* SGIs/PPIs are always routed to this VCPU */
206     vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id);
207 
208     v->domain->arch.vgic.handler->vcpu_init(v);
209 
210     memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
211     for (i = 0; i < 32; i++)
212         vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i);
213 
214     INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs);
215     INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
216     spin_lock_init(&v->arch.vgic.lock);
217 
218     return 0;
219 }
220 
vcpu_vgic_free(struct vcpu * v)221 int vcpu_vgic_free(struct vcpu *v)
222 {
223     xfree(v->arch.vgic.private_irqs);
224     return 0;
225 }
226 
vgic_get_target_vcpu(struct vcpu * v,unsigned int virq)227 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
228 {
229     struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
230     int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
231     return v->domain->vcpu[target];
232 }
233 
vgic_get_virq_priority(struct vcpu * v,unsigned int virq)234 static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
235 {
236     struct vgic_irq_rank *rank;
237 
238     /* LPIs don't have a rank, also store their priority separately. */
239     if ( is_lpi(virq) )
240         return v->domain->arch.vgic.handler->lpi_get_priority(v->domain, virq);
241 
242     rank = vgic_rank_irq(v, virq);
243     return ACCESS_ONCE(rank->priority[virq & INTERRUPT_RANK_MASK]);
244 }
245 
vgic_migrate_irq(struct vcpu * old,struct vcpu * new,unsigned int irq)246 bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
247 {
248     unsigned long flags;
249     struct pending_irq *p;
250 
251     /* This will never be called for an LPI, as we don't migrate them. */
252     ASSERT(!is_lpi(irq));
253 
254     spin_lock_irqsave(&old->arch.vgic.lock, flags);
255 
256     p = irq_to_pending(old, irq);
257 
258     /* nothing to do for virtual interrupts */
259     if ( p->desc == NULL )
260     {
261         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
262         return true;
263     }
264 
265     /* migration already in progress, no need to do anything */
266     if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
267     {
268         gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq);
269         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
270         return false;
271     }
272 
273     perfc_incr(vgic_irq_migrates);
274 
275     if ( list_empty(&p->inflight) )
276     {
277         irq_set_affinity(p->desc, cpumask_of(new->processor));
278         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
279         return true;
280     }
281     /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
282     if ( !list_empty(&p->lr_queue) )
283     {
284         gic_remove_irq_from_queues(old, p);
285         irq_set_affinity(p->desc, cpumask_of(new->processor));
286         spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
287         vgic_vcpu_inject_irq(new, irq);
288         return true;
289     }
290     /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
291      * and wait for the EOI */
292     if ( !list_empty(&p->inflight) )
293         set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
294 
295     spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
296     return true;
297 }
298 
arch_move_irqs(struct vcpu * v)299 void arch_move_irqs(struct vcpu *v)
300 {
301     const cpumask_t *cpu_mask = cpumask_of(v->processor);
302     struct domain *d = v->domain;
303     struct pending_irq *p;
304     struct vcpu *v_target;
305     int i;
306 
307     /*
308      * We don't migrate LPIs at the moment.
309      * If we ever do, we must make sure that the struct pending_irq does
310      * not go away, as there is no lock preventing this here.
311      * To ensure this, we check if the loop below ever touches LPIs.
312      * In the moment vgic_num_irqs() just covers SPIs, as it's mostly used
313      * for allocating the pending_irq and irq_desc array, in which LPIs
314      * don't participate.
315      */
316     ASSERT(!is_lpi(vgic_num_irqs(d) - 1));
317 
318     for ( i = 32; i < vgic_num_irqs(d); i++ )
319     {
320         v_target = vgic_get_target_vcpu(v, i);
321         p = irq_to_pending(v_target, i);
322 
323         if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
324             irq_set_affinity(p->desc, cpu_mask);
325     }
326 }
327 
vgic_disable_irqs(struct vcpu * v,uint32_t r,int n)328 void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
329 {
330     const unsigned long mask = r;
331     struct pending_irq *p;
332     struct irq_desc *desc;
333     unsigned int irq;
334     unsigned long flags;
335     int i = 0;
336     struct vcpu *v_target;
337 
338     /* LPIs will never be disabled via this function. */
339     ASSERT(!is_lpi(32 * n + 31));
340 
341     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
342         irq = i + (32 * n);
343         v_target = vgic_get_target_vcpu(v, irq);
344 
345         spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
346         p = irq_to_pending(v_target, irq);
347         clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
348         gic_remove_from_lr_pending(v_target, p);
349         desc = p->desc;
350         spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
351 
352         if ( desc != NULL )
353         {
354             spin_lock_irqsave(&desc->lock, flags);
355             desc->handler->disable(desc);
356             spin_unlock_irqrestore(&desc->lock, flags);
357         }
358         i++;
359     }
360 }
361 
362 #define VGIC_ICFG_MASK(intr) (1 << ((2 * ((intr) % 16)) + 1))
363 
364 /* The function should be called with the rank lock taken */
vgic_get_virq_type(struct vcpu * v,int n,int index)365 static inline unsigned int vgic_get_virq_type(struct vcpu *v, int n, int index)
366 {
367     struct vgic_irq_rank *r = vgic_get_rank(v, n);
368     uint32_t tr = r->icfg[index >> 4];
369 
370     ASSERT(spin_is_locked(&r->lock));
371 
372     if ( tr & VGIC_ICFG_MASK(index) )
373         return IRQ_TYPE_EDGE_RISING;
374     else
375         return IRQ_TYPE_LEVEL_HIGH;
376 }
377 
vgic_enable_irqs(struct vcpu * v,uint32_t r,int n)378 void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
379 {
380     const unsigned long mask = r;
381     struct pending_irq *p;
382     unsigned int irq;
383     unsigned long flags;
384     int i = 0;
385     struct vcpu *v_target;
386     struct domain *d = v->domain;
387 
388     /* LPIs will never be enabled via this function. */
389     ASSERT(!is_lpi(32 * n + 31));
390 
391     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
392         irq = i + (32 * n);
393         v_target = vgic_get_target_vcpu(v, irq);
394         spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
395         p = irq_to_pending(v_target, irq);
396         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
397         if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
398             gic_raise_guest_irq(v_target, irq, p->priority);
399         spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
400         if ( p->desc != NULL )
401         {
402             irq_set_affinity(p->desc, cpumask_of(v_target->processor));
403             spin_lock_irqsave(&p->desc->lock, flags);
404             /*
405              * The irq cannot be a PPI, we only support delivery of SPIs
406              * to guests.
407              */
408             ASSERT(irq >= 32);
409             if ( irq_type_set_by_domain(d) )
410                 gic_set_irq_type(p->desc, vgic_get_virq_type(v, n, i));
411             p->desc->handler->enable(p->desc);
412             spin_unlock_irqrestore(&p->desc->lock, flags);
413         }
414         i++;
415     }
416 }
417 
vgic_to_sgi(struct vcpu * v,register_t sgir,enum gic_sgi_mode irqmode,int virq,const struct sgi_target * target)418 bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode,
419                  int virq, const struct sgi_target *target)
420 {
421     struct domain *d = v->domain;
422     int vcpuid;
423     int i;
424     unsigned int base;
425     unsigned long int bitmap;
426 
427     ASSERT( virq < 16 );
428 
429     switch ( irqmode )
430     {
431     case SGI_TARGET_LIST:
432         perfc_incr(vgic_sgi_list);
433         base = target->aff1 << 4;
434         bitmap = target->list;
435         for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 )
436         {
437             vcpuid = base + i;
438             if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL ||
439                  !is_vcpu_online(d->vcpu[vcpuid]) )
440             {
441                 gprintk(XENLOG_WARNING, "VGIC: write r=%"PRIregister" \
442                         target->list=%hx, wrong CPUTargetList \n",
443                         sgir, target->list);
444                 continue;
445             }
446             vgic_vcpu_inject_irq(d->vcpu[vcpuid], virq);
447         }
448         break;
449     case SGI_TARGET_OTHERS:
450         perfc_incr(vgic_sgi_others);
451         for ( i = 0; i < d->max_vcpus; i++ )
452         {
453             if ( i != current->vcpu_id && d->vcpu[i] != NULL &&
454                  is_vcpu_online(d->vcpu[i]) )
455                 vgic_vcpu_inject_irq(d->vcpu[i], virq);
456         }
457         break;
458     case SGI_TARGET_SELF:
459         perfc_incr(vgic_sgi_self);
460         vgic_vcpu_inject_irq(d->vcpu[current->vcpu_id], virq);
461         break;
462     default:
463         gprintk(XENLOG_WARNING,
464                 "vGICD:unhandled GICD_SGIR write %"PRIregister" \
465                  with wrong mode\n", sgir);
466         return false;
467     }
468 
469     return true;
470 }
471 
472 /*
473  * Returns the pointer to the struct pending_irq belonging to the given
474  * interrupt.
475  * This can return NULL if called for an LPI which has been unmapped
476  * meanwhile.
477  */
irq_to_pending(struct vcpu * v,unsigned int irq)478 struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
479 {
480     struct pending_irq *n;
481     /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
482      * are used for SPIs; the rests are used for per cpu irqs */
483     if ( irq < 32 )
484         n = &v->arch.vgic.pending_irqs[irq];
485     else if ( is_lpi(irq) )
486         n = v->domain->arch.vgic.handler->lpi_to_pending(v->domain, irq);
487     else
488         n = &v->domain->arch.vgic.pending_irqs[irq - 32];
489     return n;
490 }
491 
spi_to_pending(struct domain * d,unsigned int irq)492 struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq)
493 {
494     ASSERT(irq >= NR_LOCAL_IRQS);
495 
496     return &d->arch.vgic.pending_irqs[irq - 32];
497 }
498 
vgic_clear_pending_irqs(struct vcpu * v)499 void vgic_clear_pending_irqs(struct vcpu *v)
500 {
501     struct pending_irq *p, *t;
502     unsigned long flags;
503 
504     spin_lock_irqsave(&v->arch.vgic.lock, flags);
505     list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight )
506         list_del_init(&p->inflight);
507     gic_clear_pending_irqs(v);
508     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
509 }
510 
vgic_vcpu_inject_irq(struct vcpu * v,unsigned int virq)511 void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
512 {
513     uint8_t priority;
514     struct pending_irq *iter, *n;
515     unsigned long flags;
516     bool running;
517 
518     spin_lock_irqsave(&v->arch.vgic.lock, flags);
519 
520     n = irq_to_pending(v, virq);
521     /* If an LPI has been removed, there is nothing to inject here. */
522     if ( unlikely(!n) )
523     {
524         spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
525         return;
526     }
527 
528     /* vcpu offline */
529     if ( test_bit(_VPF_down, &v->pause_flags) )
530     {
531         spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
532         return;
533     }
534 
535     set_bit(GIC_IRQ_GUEST_QUEUED, &n->status);
536 
537     if ( !list_empty(&n->inflight) )
538     {
539         gic_raise_inflight_irq(v, virq);
540         goto out;
541     }
542 
543     priority = vgic_get_virq_priority(v, virq);
544     n->priority = priority;
545 
546     /* the irq is enabled */
547     if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
548         gic_raise_guest_irq(v, virq, priority);
549 
550     list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
551     {
552         if ( iter->priority > priority )
553         {
554             list_add_tail(&n->inflight, &iter->inflight);
555             goto out;
556         }
557     }
558     list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
559 out:
560     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
561     /* we have a new higher priority irq, inject it into the guest */
562     running = v->is_running;
563     vcpu_unblock(v);
564     if ( running && v != current )
565     {
566         perfc_incr(vgic_cross_cpu_intr_inject);
567         smp_send_event_check_mask(cpumask_of(v->processor));
568     }
569 }
570 
vgic_vcpu_inject_spi(struct domain * d,unsigned int virq)571 void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq)
572 {
573     struct vcpu *v;
574 
575     /* the IRQ needs to be an SPI */
576     ASSERT(virq >= 32 && virq <= vgic_num_irqs(d));
577 
578     v = vgic_get_target_vcpu(d->vcpu[0], virq);
579     vgic_vcpu_inject_irq(v, virq);
580 }
581 
arch_evtchn_inject(struct vcpu * v)582 void arch_evtchn_inject(struct vcpu *v)
583 {
584     vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
585 }
586 
vgic_emulate(struct cpu_user_regs * regs,union hsr hsr)587 bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr)
588 {
589     struct vcpu *v = current;
590 
591     ASSERT(v->domain->arch.vgic.handler->emulate_reg != NULL);
592 
593     return v->domain->arch.vgic.handler->emulate_reg(regs, hsr);
594 }
595 
vgic_reserve_virq(struct domain * d,unsigned int virq)596 bool vgic_reserve_virq(struct domain *d, unsigned int virq)
597 {
598     if ( virq >= vgic_num_irqs(d) )
599         return false;
600 
601     return !test_and_set_bit(virq, d->arch.vgic.allocated_irqs);
602 }
603 
vgic_allocate_virq(struct domain * d,bool spi)604 int vgic_allocate_virq(struct domain *d, bool spi)
605 {
606     int first, end;
607     unsigned int virq;
608 
609     if ( !spi )
610     {
611         /* We only allocate PPIs. SGIs are all reserved */
612         first = 16;
613         end = 32;
614     }
615     else
616     {
617         first = 32;
618         end = vgic_num_irqs(d);
619     }
620 
621     /*
622      * There is no spinlock to protect allocated_irqs, therefore
623      * test_and_set_bit may fail. If so retry it.
624      */
625     do
626     {
627         virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first);
628         if ( virq >= end )
629             return -1;
630     }
631     while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) );
632 
633     return virq;
634 }
635 
vgic_free_virq(struct domain * d,unsigned int virq)636 void vgic_free_virq(struct domain *d, unsigned int virq)
637 {
638     clear_bit(virq, d->arch.vgic.allocated_irqs);
639 }
640 
641 /*
642  * Local variables:
643  * mode: C
644  * c-file-style: "BSD"
645  * c-basic-offset: 4
646  * indent-tabs-mode: nil
647  * End:
648  */
649 
650