1 #ifndef _ASM_HW_IRQ_H
2 #define _ASM_HW_IRQ_H
3
4 /* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */
5
6 #include <asm/atomic.h>
7 #include <asm/numa.h>
8 #include <xen/cpumask.h>
9 #include <xen/smp.h>
10 #include <asm/hvm/irq.h>
11 #include <irq_vectors.h>
12 #include <asm/percpu.h>
13
14 extern unsigned int nr_irqs_gsi;
15 extern unsigned int nr_irqs;
16 #define nr_static_irqs nr_irqs_gsi
17
18 #define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \
19 (1 << (irq)) & io_apic_irqs : \
20 (irq) < nr_irqs_gsi)
21
22 #define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs)
23
24 #define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR)
25
26 typedef struct {
27 DECLARE_BITMAP(_bits,NR_VECTORS);
28 } vmask_t;
29
30 struct irq_desc;
31
32 struct arch_irq_desc {
33 s16 vector; /* vector itself is only 8 bits, */
34 s16 old_vector; /* but we use -1 for unassigned */
35 cpumask_var_t cpu_mask;
36 cpumask_var_t old_cpu_mask;
37 cpumask_var_t pending_mask;
38 unsigned move_cleanup_count;
39 vmask_t *used_vectors;
40 u8 move_in_progress : 1;
41 s8 used;
42 };
43
44 /* For use with irq_desc.arch.used */
45 #define IRQ_UNUSED (0)
46 #define IRQ_USED (1)
47 #define IRQ_RESERVED (-1)
48
49 #define IRQ_VECTOR_UNASSIGNED (-1)
50
51 typedef int vector_irq_t[NR_VECTORS];
52 DECLARE_PER_CPU(vector_irq_t, vector_irq);
53
54 extern bool opt_noirqbalance;
55
56 #define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */
57 #define OPT_IRQ_VECTOR_MAP_NONE 1 /* None */
58 #define OPT_IRQ_VECTOR_MAP_GLOBAL 2 /* One global vector map (no vector sharing) */
59 #define OPT_IRQ_VECTOR_MAP_PERDEV 3 /* Per-device vetor map (no vector sharing w/in a device) */
60
61 extern int opt_irq_vector_map;
62
63 /*
64 * Per-cpu current frame pointer - the location of the last exception frame on
65 * the stack
66 */
67 DECLARE_PER_CPU(struct cpu_user_regs *, __irq_regs);
68
get_irq_regs(void)69 static inline struct cpu_user_regs *get_irq_regs(void)
70 {
71 return __get_cpu_var(__irq_regs);
72 }
73
set_irq_regs(struct cpu_user_regs * new_regs)74 static inline struct cpu_user_regs *set_irq_regs(struct cpu_user_regs *new_regs)
75 {
76 struct cpu_user_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
77
78 old_regs = *pp_regs;
79 *pp_regs = new_regs;
80 return old_regs;
81 }
82
83
84 #define platform_legacy_irq(irq) ((irq) < 16)
85
86 void event_check_interrupt(struct cpu_user_regs *regs);
87 void invalidate_interrupt(struct cpu_user_regs *regs);
88 void call_function_interrupt(struct cpu_user_regs *regs);
89 void apic_timer_interrupt(struct cpu_user_regs *regs);
90 void error_interrupt(struct cpu_user_regs *regs);
91 void pmu_apic_interrupt(struct cpu_user_regs *regs);
92 void spurious_interrupt(struct cpu_user_regs *regs);
93 void irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
94
95 uint8_t alloc_hipriority_vector(void);
96
97 void set_direct_apic_vector(
98 uint8_t vector, void (*handler)(struct cpu_user_regs *));
99 void alloc_direct_apic_vector(
100 uint8_t *vector, void (*handler)(struct cpu_user_regs *));
101
102 void do_IRQ(struct cpu_user_regs *regs);
103
104 void disable_8259A_irq(struct irq_desc *);
105 void enable_8259A_irq(struct irq_desc *);
106 int i8259A_irq_pending(unsigned int irq);
107 void mask_8259A(void);
108 void unmask_8259A(void);
109 void init_8259A(int aeoi);
110 void make_8259A_irq(unsigned int irq);
111 bool bogus_8259A_irq(unsigned int irq);
112 int i8259A_suspend(void);
113 int i8259A_resume(void);
114
115 void setup_IO_APIC(void);
116 void disable_IO_APIC(void);
117 void setup_ioapic_dest(void);
118 vmask_t *io_apic_get_used_vector_map(unsigned int irq);
119
120 extern unsigned int io_apic_irqs;
121
122 DECLARE_PER_CPU(unsigned int, irq_count);
123
124 struct pirq;
125 struct arch_pirq {
126 int irq;
127 union {
128 struct hvm_pirq {
129 int emuirq;
130 struct hvm_pirq_dpci dpci;
131 } hvm;
132 };
133 };
134
135 #define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL)
136 #define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci)
137
138 int pirq_shared(struct domain *d , int irq);
139
140 int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
141 void *data);
142 int unmap_domain_pirq(struct domain *d, int pirq);
143 int get_free_pirq(struct domain *d, int type);
144 int get_free_pirqs(struct domain *, unsigned int nr);
145 void free_domain_pirqs(struct domain *d);
146 int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
147 int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
148 bool hvm_domain_use_pirq(const struct domain *, const struct pirq *);
149
150 /* Reset irq affinities to match the given CPU mask. */
151 void fixup_irqs(const cpumask_t *mask, bool verbose);
152 void fixup_eoi(void);
153
154 int init_irq_data(void);
155
156 void clear_irq_vector(int irq);
157
158 int irq_to_vector(int irq);
159 int create_irq(nodeid_t node);
160 void destroy_irq(unsigned int irq);
161 int assign_irq_vector(int irq, const cpumask_t *);
162
163 extern void irq_complete_move(struct irq_desc *);
164
165 extern struct irq_desc *irq_desc;
166
167 void lock_vector_lock(void);
168 void unlock_vector_lock(void);
169
170 void setup_vector_irq(unsigned int cpu);
171
172 void move_native_irq(struct irq_desc *);
173 void move_masked_irq(struct irq_desc *);
174
175 int bind_irq_vector(int irq, int vector, const cpumask_t *);
176
177 void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
178
179 int init_domain_irq_mapping(struct domain *);
180 void cleanup_domain_irq_mapping(struct domain *);
181
182 #define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq, 0)
183 #define domain_irq_to_pirq(d, irq) ({ \
184 void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq); \
185 __ret ? radix_tree_ptr_to_int(__ret) : 0; \
186 })
187 #define PIRQ_ALLOCATED -1
188 #define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, \
189 arch.hvm.emuirq, IRQ_UNBOUND)
190 #define domain_emuirq_to_pirq(d, emuirq) ({ \
191 void *__ret = radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq, \
192 emuirq); \
193 __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND; \
194 })
195 #define IRQ_UNBOUND -1
196 #define IRQ_PT -2
197 #define IRQ_MSI_EMU -3
198
199 bool cpu_has_pending_apic_eoi(void);
200
arch_move_irqs(struct vcpu * v)201 static inline void arch_move_irqs(struct vcpu *v) { }
202
203 struct msi_info;
204 int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p);
205 int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p,
206 int type, struct msi_info *msi);
207
208 #endif /* _ASM_HW_IRQ_H */
209