1 /******************************************************************************
2  * irq.c
3  *
4  * Interrupt distribution and delivery logic.
5  *
6  * Copyright (c) 2006, K A Fraser, XenSource Inc.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include <xen/types.h>
22 #include <xen/event.h>
23 #include <xen/sched.h>
24 #include <xen/irq.h>
25 #include <xen/keyhandler.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
28 #include <asm/msi.h>
29 
30 /* Must be called with hvm_domain->irq_lock hold */
assert_gsi(struct domain * d,unsigned ioapic_gsi)31 static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
32 {
33     struct pirq *pirq =
34         pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi));
35 
36     if ( hvm_domain_use_pirq(d, pirq) )
37     {
38         send_guest_pirq(d, pirq);
39         return;
40     }
41     vioapic_irq_positive_edge(d, ioapic_gsi);
42 }
43 
assert_irq(struct domain * d,unsigned ioapic_gsi,unsigned pic_irq)44 static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
45 {
46     assert_gsi(d, ioapic_gsi);
47     vpic_irq_positive_edge(d, pic_irq);
48 }
49 
50 /* Must be called with hvm_domain->irq_lock hold */
deassert_irq(struct domain * d,unsigned isa_irq)51 static void deassert_irq(struct domain *d, unsigned isa_irq)
52 {
53     struct pirq *pirq =
54         pirq_info(d, domain_emuirq_to_pirq(d, isa_irq));
55 
56     if ( !hvm_domain_use_pirq(d, pirq) )
57         vpic_irq_negative_edge(d, isa_irq);
58 }
59 
__hvm_pci_intx_assert(struct domain * d,unsigned int device,unsigned int intx)60 static void __hvm_pci_intx_assert(
61     struct domain *d, unsigned int device, unsigned int intx)
62 {
63     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
64     unsigned int gsi, link, isa_irq;
65 
66     ASSERT((device <= 31) && (intx <= 3));
67 
68     if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
69         return;
70 
71     gsi = hvm_pci_intx_gsi(device, intx);
72     if ( gsi >= hvm_irq->nr_gsis )
73     {
74         ASSERT_UNREACHABLE();
75         return;
76     }
77     if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
78         assert_gsi(d, gsi);
79 
80     link    = hvm_pci_intx_link(device, intx);
81     isa_irq = hvm_irq->pci_link.route[link];
82     if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
83          (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
84         assert_irq(d, isa_irq, isa_irq);
85 }
86 
hvm_pci_intx_assert(struct domain * d,unsigned int device,unsigned int intx)87 void hvm_pci_intx_assert(
88     struct domain *d, unsigned int device, unsigned int intx)
89 {
90     spin_lock(&d->arch.hvm_domain.irq_lock);
91     __hvm_pci_intx_assert(d, device, intx);
92     spin_unlock(&d->arch.hvm_domain.irq_lock);
93 }
94 
__hvm_pci_intx_deassert(struct domain * d,unsigned int device,unsigned int intx)95 static void __hvm_pci_intx_deassert(
96     struct domain *d, unsigned int device, unsigned int intx)
97 {
98     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
99     unsigned int gsi, link, isa_irq;
100 
101     ASSERT((device <= 31) && (intx <= 3));
102 
103     if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
104         return;
105 
106     gsi = hvm_pci_intx_gsi(device, intx);
107     if ( gsi >= hvm_irq->nr_gsis )
108     {
109         ASSERT_UNREACHABLE();
110         return;
111     }
112     --hvm_irq->gsi_assert_count[gsi];
113 
114     link    = hvm_pci_intx_link(device, intx);
115     isa_irq = hvm_irq->pci_link.route[link];
116     if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
117          (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
118         deassert_irq(d, isa_irq);
119 }
120 
hvm_pci_intx_deassert(struct domain * d,unsigned int device,unsigned int intx)121 void hvm_pci_intx_deassert(
122     struct domain *d, unsigned int device, unsigned int intx)
123 {
124     spin_lock(&d->arch.hvm_domain.irq_lock);
125     __hvm_pci_intx_deassert(d, device, intx);
126     spin_unlock(&d->arch.hvm_domain.irq_lock);
127 }
128 
hvm_gsi_assert(struct domain * d,unsigned int gsi)129 void hvm_gsi_assert(struct domain *d, unsigned int gsi)
130 {
131     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
132 
133     if ( gsi >= hvm_irq->nr_gsis )
134     {
135         ASSERT_UNREACHABLE();
136         return;
137     }
138 
139     /*
140      * __hvm_pci_intx_{de}assert uses a bitfield in pci_intx.i to track the
141      * status of each interrupt line, and Xen does the routing and GSI
142      * assertion based on that. The value of the pci_intx.i bitmap prevents the
143      * same line from triggering multiple times. As we don't use that bitmap
144      * for the hardware domain, Xen needs to rely on gsi_assert_count in order
145      * to know if the GSI is pending or not.
146      */
147     spin_lock(&d->arch.hvm_domain.irq_lock);
148     if ( !hvm_irq->gsi_assert_count[gsi] )
149     {
150         hvm_irq->gsi_assert_count[gsi] = 1;
151         assert_gsi(d, gsi);
152     }
153     spin_unlock(&d->arch.hvm_domain.irq_lock);
154 }
155 
hvm_gsi_deassert(struct domain * d,unsigned int gsi)156 void hvm_gsi_deassert(struct domain *d, unsigned int gsi)
157 {
158     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
159 
160     if ( gsi >= hvm_irq->nr_gsis )
161     {
162         ASSERT_UNREACHABLE();
163         return;
164     }
165 
166     spin_lock(&d->arch.hvm_domain.irq_lock);
167     hvm_irq->gsi_assert_count[gsi] = 0;
168     spin_unlock(&d->arch.hvm_domain.irq_lock);
169 }
170 
hvm_isa_irq_assert(struct domain * d,unsigned int isa_irq,int (* get_vector)(const struct domain * d,unsigned int gsi))171 int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
172                        int (*get_vector)(const struct domain *d,
173                                          unsigned int gsi))
174 {
175     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
176     unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
177     int vector = -1;
178 
179     ASSERT(isa_irq <= 15);
180 
181     spin_lock(&d->arch.hvm_domain.irq_lock);
182 
183     if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
184          (hvm_irq->gsi_assert_count[gsi]++ == 0) )
185         assert_irq(d, gsi, isa_irq);
186 
187     if ( get_vector )
188         vector = get_vector(d, gsi);
189 
190     spin_unlock(&d->arch.hvm_domain.irq_lock);
191 
192     return vector;
193 }
194 
hvm_isa_irq_deassert(struct domain * d,unsigned int isa_irq)195 void hvm_isa_irq_deassert(
196     struct domain *d, unsigned int isa_irq)
197 {
198     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
199     unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
200 
201     ASSERT(isa_irq <= 15);
202 
203     spin_lock(&d->arch.hvm_domain.irq_lock);
204 
205     if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
206          (--hvm_irq->gsi_assert_count[gsi] == 0) )
207         deassert_irq(d, isa_irq);
208 
209     spin_unlock(&d->arch.hvm_domain.irq_lock);
210 }
211 
hvm_set_callback_irq_level(struct vcpu * v)212 static void hvm_set_callback_irq_level(struct vcpu *v)
213 {
214     struct domain *d = v->domain;
215     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
216     unsigned int gsi, pdev, pintx, asserted;
217 
218     ASSERT(v->vcpu_id == 0);
219 
220     spin_lock(&d->arch.hvm_domain.irq_lock);
221 
222     /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
223     asserted = !!vcpu_info(v, evtchn_upcall_pending);
224     if ( hvm_irq->callback_via_asserted == asserted )
225         goto out;
226     hvm_irq->callback_via_asserted = asserted;
227 
228     /* Callback status has changed. Update the callback via. */
229     switch ( hvm_irq->callback_via_type )
230     {
231     case HVMIRQ_callback_gsi:
232         gsi = hvm_irq->callback_via.gsi;
233         if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
234         {
235             vioapic_irq_positive_edge(d, gsi);
236             if ( gsi <= 15 )
237                 vpic_irq_positive_edge(d, gsi);
238         }
239         else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
240         {
241             if ( gsi <= 15 )
242                 vpic_irq_negative_edge(d, gsi);
243         }
244         break;
245     case HVMIRQ_callback_pci_intx:
246         pdev  = hvm_irq->callback_via.pci.dev;
247         pintx = hvm_irq->callback_via.pci.intx;
248         if ( asserted )
249             __hvm_pci_intx_assert(d, pdev, pintx);
250         else
251             __hvm_pci_intx_deassert(d, pdev, pintx);
252     default:
253         break;
254     }
255 
256  out:
257     spin_unlock(&d->arch.hvm_domain.irq_lock);
258 }
259 
hvm_maybe_deassert_evtchn_irq(void)260 void hvm_maybe_deassert_evtchn_irq(void)
261 {
262     struct domain *d = current->domain;
263     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
264 
265     if ( hvm_irq->callback_via_asserted &&
266          !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
267         hvm_set_callback_irq_level(d->vcpu[0]);
268 }
269 
hvm_assert_evtchn_irq(struct vcpu * v)270 void hvm_assert_evtchn_irq(struct vcpu *v)
271 {
272     if ( unlikely(in_irq() || !local_irq_is_enabled()) )
273     {
274         tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
275         return;
276     }
277 
278     if ( v->arch.hvm_vcpu.evtchn_upcall_vector != 0 )
279     {
280         uint8_t vector = v->arch.hvm_vcpu.evtchn_upcall_vector;
281 
282         vlapic_set_irq(vcpu_vlapic(v), vector, 0);
283     }
284     else if ( is_hvm_pv_evtchn_vcpu(v) )
285         vcpu_kick(v);
286     else if ( v->vcpu_id == 0 )
287         hvm_set_callback_irq_level(v);
288 }
289 
hvm_set_pci_link_route(struct domain * d,u8 link,u8 isa_irq)290 int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
291 {
292     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
293     u8 old_isa_irq;
294     int i;
295 
296     if ( (link > 3) || (isa_irq > 15) )
297         return -EINVAL;
298 
299     spin_lock(&d->arch.hvm_domain.irq_lock);
300 
301     old_isa_irq = hvm_irq->pci_link.route[link];
302     if ( old_isa_irq == isa_irq )
303         goto out;
304     hvm_irq->pci_link.route[link] = isa_irq;
305 
306     /* PCI pass-through fixup. */
307     if ( hvm_irq->dpci )
308     {
309         if ( old_isa_irq )
310             clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map);
311 
312         for ( i = 0; i < NR_LINK; i++ )
313             if ( hvm_irq->dpci->link_cnt[i] && hvm_irq->pci_link.route[i] )
314                 set_bit(hvm_irq->pci_link.route[i],
315                         &hvm_irq->dpci->isairq_map);
316     }
317 
318     if ( hvm_irq->pci_link_assert_count[link] == 0 )
319         goto out;
320 
321     if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
322         vpic_irq_negative_edge(d, old_isa_irq);
323 
324     if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
325     {
326         vioapic_irq_positive_edge(d, isa_irq);
327         vpic_irq_positive_edge(d, isa_irq);
328     }
329 
330  out:
331     spin_unlock(&d->arch.hvm_domain.irq_lock);
332 
333     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
334             d->domain_id, link, old_isa_irq, isa_irq);
335 
336     return 0;
337 }
338 
hvm_inject_msi(struct domain * d,uint64_t addr,uint32_t data)339 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
340 {
341     uint32_t tmp = (uint32_t) addr;
342     uint8_t  dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
343     uint8_t  dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
344     uint8_t  delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
345         >> MSI_DATA_DELIVERY_MODE_SHIFT;
346     uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
347         >> MSI_DATA_TRIGGER_SHIFT;
348     uint8_t vector = data & MSI_DATA_VECTOR_MASK;
349 
350     if ( !vector )
351     {
352         int pirq = ((addr >> 32) & 0xffffff00) | dest;
353 
354         if ( pirq > 0 )
355         {
356             struct pirq *info = pirq_info(d, pirq);
357 
358             /* if it is the first time, allocate the pirq */
359             if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND )
360             {
361                 int rc;
362 
363                 spin_lock(&d->event_lock);
364                 rc = map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU);
365                 spin_unlock(&d->event_lock);
366                 if ( rc )
367                     return rc;
368                 info = pirq_info(d, pirq);
369                 if ( !info )
370                     return -EBUSY;
371             }
372             else if ( info->arch.hvm.emuirq != IRQ_MSI_EMU )
373                 return -EINVAL;
374             send_guest_pirq(d, info);
375             return 0;
376         }
377         return -ERANGE;
378     }
379 
380     return vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
381 }
382 
hvm_set_callback_via(struct domain * d,uint64_t via)383 void hvm_set_callback_via(struct domain *d, uint64_t via)
384 {
385     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
386     unsigned int gsi=0, pdev=0, pintx=0;
387     uint8_t via_type;
388     struct vcpu *v;
389 
390     via_type = (uint8_t)MASK_EXTR(via, HVM_PARAM_CALLBACK_IRQ_TYPE_MASK) + 1;
391     if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
392          (via_type > HVMIRQ_callback_vector) )
393         via_type = HVMIRQ_callback_none;
394 
395     if ( via_type != HVMIRQ_callback_vector &&
396          (!has_vlapic(d) || !has_vioapic(d) || !has_vpic(d)) )
397         return;
398 
399     spin_lock(&d->arch.hvm_domain.irq_lock);
400 
401     /* Tear down old callback via. */
402     if ( hvm_irq->callback_via_asserted )
403     {
404         switch ( hvm_irq->callback_via_type )
405         {
406         case HVMIRQ_callback_gsi:
407             gsi = hvm_irq->callback_via.gsi;
408             if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
409                 vpic_irq_negative_edge(d, gsi);
410             break;
411         case HVMIRQ_callback_pci_intx:
412             pdev  = hvm_irq->callback_via.pci.dev;
413             pintx = hvm_irq->callback_via.pci.intx;
414             __hvm_pci_intx_deassert(d, pdev, pintx);
415             break;
416         default:
417             break;
418         }
419     }
420 
421     /* Set up new callback via. */
422     switch ( hvm_irq->callback_via_type = via_type )
423     {
424     case HVMIRQ_callback_gsi:
425         gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
426         if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) )
427             hvm_irq->callback_via_type = HVMIRQ_callback_none;
428         else if ( hvm_irq->callback_via_asserted &&
429                   (hvm_irq->gsi_assert_count[gsi]++ == 0) )
430         {
431             vioapic_irq_positive_edge(d, gsi);
432             if ( gsi <= 15 )
433                 vpic_irq_positive_edge(d, gsi);
434         }
435         break;
436     case HVMIRQ_callback_pci_intx:
437         pdev  = hvm_irq->callback_via.pci.dev  = (uint8_t)(via >> 11) & 31;
438         pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
439         if ( hvm_irq->callback_via_asserted )
440              __hvm_pci_intx_assert(d, pdev, pintx);
441         break;
442     case HVMIRQ_callback_vector:
443         hvm_irq->callback_via.vector = (uint8_t)via;
444         break;
445     default:
446         break;
447     }
448 
449     spin_unlock(&d->arch.hvm_domain.irq_lock);
450 
451     for_each_vcpu ( d, v )
452         if ( is_vcpu_online(v) )
453             hvm_assert_evtchn_irq(v);
454 
455 #ifndef NDEBUG
456     printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id);
457     switch ( via_type )
458     {
459     case HVMIRQ_callback_gsi:
460         printk("GSI %u\n", gsi);
461         break;
462     case HVMIRQ_callback_pci_intx:
463         printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
464         break;
465     case HVMIRQ_callback_vector:
466         printk("Direct Vector 0x%02x\n", (uint8_t)via);
467         break;
468     default:
469         printk("None\n");
470         break;
471     }
472 #endif
473 }
474 
hvm_vcpu_has_pending_irq(struct vcpu * v)475 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
476 {
477     struct hvm_domain *plat = &v->domain->arch.hvm_domain;
478     int vector;
479 
480     if ( unlikely(v->nmi_pending) )
481         return hvm_intack_nmi;
482 
483     if ( unlikely(v->mce_pending) )
484         return hvm_intack_mce;
485 
486     if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
487          && vcpu_info(v, evtchn_upcall_pending) )
488         return hvm_intack_vector(plat->irq->callback_via.vector);
489 
490     if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
491         return hvm_intack_pic(0);
492 
493     vector = vlapic_has_pending_irq(v);
494     if ( vector != -1 )
495         return hvm_intack_lapic(vector);
496 
497     return hvm_intack_none;
498 }
499 
hvm_vcpu_ack_pending_irq(struct vcpu * v,struct hvm_intack intack)500 struct hvm_intack hvm_vcpu_ack_pending_irq(
501     struct vcpu *v, struct hvm_intack intack)
502 {
503     int vector;
504 
505     switch ( intack.source )
506     {
507     case hvm_intsrc_nmi:
508         if ( !test_and_clear_bool(v->nmi_pending) )
509             intack = hvm_intack_none;
510         break;
511     case hvm_intsrc_mce:
512         if ( !test_and_clear_bool(v->mce_pending) )
513             intack = hvm_intack_none;
514         break;
515     case hvm_intsrc_pic:
516         if ( (vector = vpic_ack_pending_irq(v)) == -1 )
517             intack = hvm_intack_none;
518         else
519             intack.vector = (uint8_t)vector;
520         break;
521     case hvm_intsrc_lapic:
522         if ( !vlapic_ack_pending_irq(v, intack.vector, 0) )
523             intack = hvm_intack_none;
524         break;
525     case hvm_intsrc_vector:
526         break;
527     default:
528         intack = hvm_intack_none;
529         break;
530     }
531 
532     return intack;
533 }
534 
hvm_local_events_need_delivery(struct vcpu * v)535 int hvm_local_events_need_delivery(struct vcpu *v)
536 {
537     struct hvm_intack intack = hvm_vcpu_has_pending_irq(v);
538 
539     if ( likely(intack.source == hvm_intsrc_none) )
540         return 0;
541 
542     return !hvm_interrupt_blocked(v, intack);
543 }
544 
arch_evtchn_inject(struct vcpu * v)545 void arch_evtchn_inject(struct vcpu *v)
546 {
547     if ( is_hvm_vcpu(v) )
548         hvm_assert_evtchn_irq(v);
549 }
550 
irq_dump(struct domain * d)551 static void irq_dump(struct domain *d)
552 {
553     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
554     int i;
555     printk("Domain %d:\n", d->domain_id);
556     printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
557            " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
558            hvm_irq->pci_intx.pad[0],  hvm_irq->pci_intx.pad[1],
559            (uint32_t) hvm_irq->isa_irq.pad[0],
560            hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
561            hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
562     for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 )
563         printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
564                " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
565                i, i+7,
566                hvm_irq->gsi_assert_count[i+0],
567                hvm_irq->gsi_assert_count[i+1],
568                hvm_irq->gsi_assert_count[i+2],
569                hvm_irq->gsi_assert_count[i+3],
570                hvm_irq->gsi_assert_count[i+4],
571                hvm_irq->gsi_assert_count[i+5],
572                hvm_irq->gsi_assert_count[i+6],
573                hvm_irq->gsi_assert_count[i+7]);
574     if ( i != hvm_irq->nr_gsis )
575     {
576         printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1);
577         for ( ; i < hvm_irq->nr_gsis; i++)
578             printk(" %2"PRIu8, hvm_irq->gsi_assert_count[i]);
579         printk("\n");
580     }
581     printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
582            hvm_irq->pci_link_assert_count[0],
583            hvm_irq->pci_link_assert_count[1],
584            hvm_irq->pci_link_assert_count[2],
585            hvm_irq->pci_link_assert_count[3]);
586     printk("Callback via %i:%#"PRIx32",%s asserted\n",
587            hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
588            hvm_irq->callback_via_asserted ? "" : " not");
589 }
590 
dump_irq_info(unsigned char key)591 static void dump_irq_info(unsigned char key)
592 {
593     struct domain *d;
594 
595     printk("'%c' pressed -> dumping HVM irq info\n", key);
596 
597     rcu_read_lock(&domlist_read_lock);
598 
599     for_each_domain ( d )
600         if ( is_hvm_domain(d) )
601             irq_dump(d);
602 
603     rcu_read_unlock(&domlist_read_lock);
604 }
605 
dump_irq_info_key_init(void)606 static int __init dump_irq_info_key_init(void)
607 {
608     register_keyhandler('I', dump_irq_info, "dump HVM irq info", 1);
609     return 0;
610 }
611 __initcall(dump_irq_info_key_init);
612 
irq_save_pci(struct domain * d,hvm_domain_context_t * h)613 static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
614 {
615     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
616     unsigned int asserted, pdev, pintx;
617     int rc;
618 
619     spin_lock(&d->arch.hvm_domain.irq_lock);
620 
621     pdev  = hvm_irq->callback_via.pci.dev;
622     pintx = hvm_irq->callback_via.pci.intx;
623     asserted = (hvm_irq->callback_via_asserted &&
624                 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
625 
626     /*
627      * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
628      * status is not save/restored, so the INTx line must be deasserted in
629      * the restore context.
630      */
631     if ( asserted )
632         __hvm_pci_intx_deassert(d, pdev, pintx);
633 
634     /* Save PCI IRQ lines */
635     rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
636 
637     if ( asserted )
638         __hvm_pci_intx_assert(d, pdev, pintx);
639 
640     spin_unlock(&d->arch.hvm_domain.irq_lock);
641 
642     return rc;
643 }
644 
irq_save_isa(struct domain * d,hvm_domain_context_t * h)645 static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
646 {
647     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
648 
649     /* Save ISA IRQ lines */
650     return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
651 }
652 
irq_save_link(struct domain * d,hvm_domain_context_t * h)653 static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
654 {
655     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
656 
657     /* Save PCI-ISA link state */
658     return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
659 }
660 
irq_load_pci(struct domain * d,hvm_domain_context_t * h)661 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
662 {
663     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
664     int link, dev, intx, gsi;
665 
666     /* Load the PCI IRQ lines */
667     if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
668         return -EINVAL;
669 
670     /* Clear the PCI link assert counts */
671     for ( link = 0; link < 4; link++ )
672         hvm_irq->pci_link_assert_count[link] = 0;
673 
674     /* Clear the GSI link assert counts */
675     for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ )
676         hvm_irq->gsi_assert_count[gsi] = 0;
677 
678     /* Recalculate the counts from the IRQ line state */
679     for ( dev = 0; dev < 32; dev++ )
680         for ( intx = 0; intx < 4; intx++ )
681             if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
682             {
683                 /* Direct GSI assert */
684                 gsi = hvm_pci_intx_gsi(dev, intx);
685                 hvm_irq->gsi_assert_count[gsi]++;
686                 /* PCI-ISA bridge assert */
687                 link = hvm_pci_intx_link(dev, intx);
688                 hvm_irq->pci_link_assert_count[link]++;
689             }
690 
691     return 0;
692 }
693 
irq_load_isa(struct domain * d,hvm_domain_context_t * h)694 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
695 {
696     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
697     int irq;
698 
699     /* Load the ISA IRQ lines */
700     if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
701         return -EINVAL;
702 
703     /* Adjust the GSI assert counts for the ISA IRQ line state.
704      * This relies on the PCI IRQ state being loaded first. */
705     for ( irq = 0; platform_legacy_irq(irq); irq++ )
706         if ( test_bit(irq, &hvm_irq->isa_irq.i) )
707             hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
708 
709     return 0;
710 }
711 
712 
irq_load_link(struct domain * d,hvm_domain_context_t * h)713 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
714 {
715     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
716     int link, gsi;
717 
718     /* Load the PCI-ISA IRQ link routing table */
719     if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
720         return -EINVAL;
721 
722     /* Sanity check */
723     for ( link = 0; link < 4; link++ )
724         if ( hvm_irq->pci_link.route[link] > 15 )
725         {
726             gdprintk(XENLOG_ERR,
727                      "HVM restore: PCI-ISA link %u out of range (%u)\n",
728                      link, hvm_irq->pci_link.route[link]);
729             return -EINVAL;
730         }
731 
732     /* Adjust the GSI assert counts for the link outputs.
733      * This relies on the PCI and ISA IRQ state being loaded first */
734     for ( link = 0; link < 4; link++ )
735     {
736         if ( hvm_irq->pci_link_assert_count[link] != 0 )
737         {
738             gsi = hvm_irq->pci_link.route[link];
739             if ( gsi != 0 )
740                 hvm_irq->gsi_assert_count[gsi]++;
741         }
742     }
743 
744     return 0;
745 }
746 
747 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
748                           1, HVMSR_PER_DOM);
749 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
750                           1, HVMSR_PER_DOM);
751 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
752                           1, HVMSR_PER_DOM);
753