1 #ifndef _ASM_HW_IRQ_H
2 #define _ASM_HW_IRQ_H
3 
4 /* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */
5 
6 #include <asm/atomic.h>
7 #include <asm/numa.h>
8 #include <xen/cpumask.h>
9 #include <xen/percpu.h>
10 #include <xen/smp.h>
11 #include <asm/hvm/irq.h>
12 
13 extern unsigned int nr_irqs_gsi;
14 extern unsigned int nr_irqs;
15 #define nr_static_irqs nr_irqs_gsi
16 
17 #define IO_APIC_IRQ(irq)    (platform_legacy_irq(irq) ?    \
18 			     (1 << (irq)) & io_apic_irqs : \
19 			     (irq) < nr_irqs_gsi)
20 
21 #define MSI_IRQ(irq)       ((irq) >= nr_irqs_gsi && (irq) < nr_irqs)
22 
23 #define LEGACY_VECTOR(irq)          ((irq) + FIRST_LEGACY_VECTOR)
24 
25 typedef struct {
26     DECLARE_BITMAP(_bits, X86_IDT_VECTORS);
27 } vmask_t;
28 
29 struct irq_desc;
30 
31 /*
32  * Xen logic for moving interrupts around CPUs allows manipulating interrupts
33  * that target remote CPUs.  The logic to move an interrupt from CPU(s) is as
34  * follows:
35  *
36  * 1. irq_set_affinity() is called with the new destination mask, such mask is
37  *    copied into pending_mask and IRQ_MOVE_PENDING is set in status to notice
38  *    an affinity change has been requested.
39  * 2. An interrupt acked with the IRQ_MOVE_PENDING will trigger the logic to
40  *    migrate it to a destination in pending_mask as long as the mask contains
41  *    any online CPUs.
42  * 3. cpu_mask and vector is copied to old_cpu_mask and old_vector.
43  * 4. New cpu_mask and vector are set, vector is setup at the new destination.
44  * 5. move_in_progress is set.
45  * 6. Interrupt source is updated to target new CPU and vector.
46  * 7. Interrupts arriving at old_cpu_mask are processed normally.
47  * 8. When the first interrupt is delivered at the new destination (cpu_mask) as
48  *    part of acking the interrupt the cleanup of the old destination(s) is
49  *    engaged.  move_in_progress is cleared and old_cpu_mask is
50  *    reduced to the online CPUs.  If the result is empty the old vector is
51  *    released.  Otherwise move_cleanup_count is set to the weight of online
52  *    CPUs in old_cpu_mask and IRQ_MOVE_CLEANUP_VECTOR is sent to them.
53  * 9. When receiving IRQ_MOVE_CLEANUP_VECTOR CPUs in old_cpu_mask clean the
54  *    vector entry and decrease the count in move_cleanup_count.  The CPU that
55  *    sets move_cleanup_count to 0 releases the vector.
56  *
57  * Note that when interrupt movement (either move_in_progress or
58  * move_cleanup_count set) is in progress it's not possible to move the
59  * interrupt to yet a different CPU.
60  *
61  * Interrupt movements done by fixup_irqs() skip setting IRQ_MOVE_PENDING and
62  * pending_mask as the movement must be performed right away, and so start
63  * directly from step 3.
64  *
65  * By keeping the vector in the old CPU(s) configured until the interrupt is
66  * acked on the new destination Xen allows draining any pending interrupts at
67  * the old destinations.
68  */
69 struct arch_irq_desc {
70         int16_t vector;                  /* vector itself is only 8 bits, */
71         int16_t old_vector;              /* but we use -1 for unassigned  */
72         /*
73          * Except for high priority interrupts @cpu_mask may have bits set for
74          * offline CPUs.  Consumers need to be careful to mask this down to
75          * online ones as necessary.  There is supposed to always be a non-
76          * empty intersection with cpu_online_map.
77          */
78         cpumask_var_t cpu_mask;
79         cpumask_var_t old_cpu_mask;
80         cpumask_var_t pending_mask;
81         vmask_t *used_vectors;
82         unsigned move_cleanup_count;
83         bool move_in_progress : 1;
84         int8_t used;
85         /*
86          * Weak reference to domain having permission over this IRQ (which can
87          * be different from the domain actually having the IRQ assigned)
88          */
89         domid_t creator_domid;
90 };
91 
92 /* For use with irq_desc.arch.used */
93 #define IRQ_UNUSED      (0)
94 #define IRQ_USED        (1)
95 #define IRQ_RESERVED    (-1)
96 
97 #define IRQ_VECTOR_UNASSIGNED (-1)
98 
99 typedef int vector_irq_t[X86_IDT_VECTORS];
100 DECLARE_PER_CPU(vector_irq_t, vector_irq);
101 
102 extern bool opt_noirqbalance;
103 
104 #define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing  */
105 #define OPT_IRQ_VECTOR_MAP_NONE    1 /* None */
106 #define OPT_IRQ_VECTOR_MAP_GLOBAL  2 /* One global vector map (no vector sharing) */
107 #define OPT_IRQ_VECTOR_MAP_PERDEV  3 /* Per-device vetor map (no vector sharing w/in a device) */
108 
109 extern int opt_irq_vector_map;
110 
111 #define platform_legacy_irq(irq)	((irq) < NR_ISA_IRQS)
112 
113 void cf_check event_check_interrupt(void);
114 void cf_check invalidate_interrupt(void);
115 void cf_check call_function_interrupt(void);
116 void cf_check irq_move_cleanup_interrupt(void);
117 
118 uint8_t alloc_hipriority_vector(void);
119 
120 void set_direct_apic_vector(uint8_t vector, void (*handler)(void));
121 void alloc_direct_apic_vector(uint8_t *vector, void (*handler)(void));
122 
123 void do_IRQ(struct cpu_user_regs *regs);
124 
125 void cf_check disable_8259A_irq(struct irq_desc *desc);
126 void cf_check enable_8259A_irq(struct irq_desc *desc);
127 int i8259A_irq_pending(unsigned int irq);
128 void mask_8259A(void);
129 void unmask_8259A(void);
130 void init_8259A(int auto_eoi);
131 void make_8259A_irq(unsigned int irq);
132 bool bogus_8259A_irq(unsigned int irq);
133 int i8259A_suspend(void);
134 int i8259A_resume(void);
135 
136 void enable_IO_APIC(void);
137 void setup_IO_APIC(void);
138 void disable_IO_APIC(void);
139 void setup_ioapic_dest(void);
140 vmask_t *io_apic_get_used_vector_map(unsigned int irq);
141 
142 extern unsigned int io_apic_irqs;
143 
144 DECLARE_PER_CPU(unsigned int, irq_count);
145 
146 struct pirq;
147 struct arch_pirq {
148     int irq;
149     union {
150         struct hvm_pirq {
151             int emuirq;
152             struct hvm_pirq_dpci dpci;
153         } hvm;
154     };
155 };
156 
157 #define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL)
158 #define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci)
159 
160 int pirq_shared(struct domain *d , int pirq);
161 
162 int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
163                            void *data);
164 int unmap_domain_pirq(struct domain *d, int pirq);
165 int get_free_pirq(struct domain *d, int type);
166 int get_free_pirqs(struct domain *d, unsigned int nr);
167 void free_domain_pirqs(struct domain *d);
168 int map_domain_emuirq_pirq(struct domain *d, int pirq, int emuirq);
169 int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
170 
171 /* Evacuate interrupts assigned to CPUs not present in the CPU online map. */
172 void fixup_irqs(void);
173 void fixup_eoi(void);
174 
175 int  init_irq_data(void);
176 
177 void clear_irq_vector(int irq);
178 
179 int irq_to_vector(int irq);
180 /*
181  * If grant_access is set the current domain is given permissions over
182  * the created IRQ.
183  */
184 int create_irq(nodeid_t node, bool grant_access);
185 void destroy_irq(unsigned int irq);
186 int assign_irq_vector(int irq, const cpumask_t *mask);
187 
188 void cf_check irq_complete_move(struct irq_desc *desc);
189 
190 extern struct irq_desc *irq_desc;
191 
192 /* Not speculation safe, only used for AP bringup. */
193 void lock_vector_lock(void);
194 void unlock_vector_lock(void);
195 
196 void setup_vector_irq(unsigned int cpu);
197 
198 void move_native_irq(struct irq_desc *desc);
199 void move_masked_irq(struct irq_desc *desc);
200 
201 int bind_irq_vector(int irq, int vector, const cpumask_t *mask);
202 
203 void cf_check end_nonmaskable_irq(struct irq_desc *desc, uint8_t vector);
204 void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask);
205 
206 int init_domain_irq_mapping(struct domain *d);
207 void cleanup_domain_irq_mapping(struct domain *d);
208 
209 #define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq, 0)
210 #define domain_irq_to_pirq(d, irq) ({                           \
211     void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq);  \
212     __ret ? radix_tree_ptr_to_int(__ret) : 0;                   \
213 })
214 #define PIRQ_ALLOCATED (-1)
215 #define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq,              \
216     arch.hvm.emuirq, IRQ_UNBOUND)
217 #define domain_emuirq_to_pirq(d, emuirq) ({                             \
218     void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\
219     __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND;                 \
220 })
221 #define IRQ_UNBOUND (-1)
222 #define IRQ_PT      (-2)
223 #define IRQ_MSI_EMU (-3)
224 
225 bool cpu_has_pending_apic_eoi(void);
226 
227 #define arch_move_irqs(v) evtchn_move_pirqs(v)
228 
229 struct msi_info;
230 int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p);
231 int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p,
232                               int type, struct msi_info *msi);
233 
234 #endif /* _ASM_HW_IRQ_H */
235