1 #ifndef __ASM_EVENT_H__
2 #define __ASM_EVENT_H__
3 
4 #include <asm/domain.h>
5 
6 void vcpu_kick(struct vcpu *v);
7 void vcpu_mark_events_pending(struct vcpu *v);
8 void vcpu_update_evtchn_irq(struct vcpu *v);
9 void vcpu_block_unless_event_pending(struct vcpu *v);
10 
vcpu_event_delivery_is_enabled(struct vcpu * v)11 static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
12 {
13     struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs;
14     return !(regs->cpsr & PSR_IRQ_MASK);
15 }
16 
local_events_need_delivery_nomask(void)17 static inline int local_events_need_delivery_nomask(void)
18 {
19     /* XXX: if the first interrupt has already been delivered, we should
20      * check whether any other interrupts with priority higher than the
21      * one in GICV_IAR are in the lr_pending queue or in the LR
22      * registers and return 1 only in that case.
23      * In practice the guest interrupt handler should run with
24      * interrupts disabled so this shouldn't be a problem in the general
25      * case.
26      */
27     if ( vgic_vcpu_pending_irq(current) )
28         return 1;
29 
30     if ( !vcpu_info(current, evtchn_upcall_pending) )
31         return 0;
32 
33     return vgic_evtchn_irq_pending(current);
34 }
35 
local_events_need_delivery(void)36 static inline int local_events_need_delivery(void)
37 {
38     if ( !vcpu_event_delivery_is_enabled(current) )
39         return 0;
40     return local_events_need_delivery_nomask();
41 }
42 
local_event_delivery_enable(void)43 static inline void local_event_delivery_enable(void)
44 {
45     struct cpu_user_regs *regs = guest_cpu_user_regs();
46     regs->cpsr &= ~PSR_IRQ_MASK;
47 }
48 
49 /* No arch specific virq definition now. Default to global. */
arch_get_virq_type(unsigned int virq)50 static inline enum virq_type arch_get_virq_type(unsigned int virq)
51 {
52     return VIRQ_GLOBAL;
53 }
54 
55 #endif
56 /*
57  * Local variables:
58  * mode: C
59  * c-file-style: "BSD"
60  * c-basic-offset: 4
61  * indent-tabs-mode: nil
62  * End:
63  */
64