1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <asm/lib/bits.h>
9 #include <asm/lib/spinlock.h>
10 #include <asm/per_cpu.h>
11 #include <asm/io.h>
12 #include <asm/irq.h>
13 #include <asm/idt.h>
14 #include <asm/ioapic.h>
15 #include <asm/lapic.h>
16 #include <dump.h>
17 #include <logmsg.h>
18 #include <asm/vmx.h>
19
20 static spinlock_t x86_irq_spinlock = { .head = 0U, .tail = 0U, };
21
22 static struct x86_irq_data irq_data[NR_IRQS];
23
24 static uint32_t vector_to_irq[NR_MAX_VECTOR + 1];
25
26 typedef void (*spurious_handler_t)(uint32_t vector);
27
28 spurious_handler_t spurious_handler;
29
30 static struct {
31 uint32_t irq;
32 uint32_t vector;
33 } irq_static_mappings[NR_STATIC_MAPPINGS] = {
34 {TIMER_IRQ, TIMER_VECTOR},
35 {THERMAL_IRQ, THERMAL_VECTOR},
36 {NOTIFY_VCPU_IRQ, NOTIFY_VCPU_VECTOR},
37 {PMI_IRQ, PMI_VECTOR},
38
39 /* To be initialized at runtime in init_irq_descs() */
40 [NR_STATIC_MAPPINGS_1 ... (NR_STATIC_MAPPINGS - 1U)] = {},
41 };
42
43 /*
44 * allocate a vector and bind it to irq
45 * for legacy_irq (irq num < 16) and static mapped ones, do nothing
46 * if mapping is correct.
47 * retval: valid vector number on success, VECTOR_INVALID on failure.
48 */
alloc_irq_vector(uint32_t irq)49 uint32_t alloc_irq_vector(uint32_t irq)
50 {
51 struct x86_irq_data *irqd;
52 uint64_t rflags;
53 uint32_t vr = VECTOR_INVALID;
54 uint32_t ret = VECTOR_INVALID;
55
56 if (irq < NR_IRQS) {
57 irqd = &irq_data[irq];
58 spinlock_irqsave_obtain(&x86_irq_spinlock, &rflags);
59
60 if (irqd->vector != VECTOR_INVALID) {
61 if (vector_to_irq[irqd->vector] == irq) {
62 /* statically binded */
63 vr = irqd->vector;
64 } else {
65 pr_err("[%s] irq[%u]:vector[%u] mismatch",
66 __func__, irq, irqd->vector);
67 }
68 } else {
69 /* alloc a vector between:
70 * VECTOR_DYNAMIC_START ~ VECTOR_DYNAMIC_END
71 */
72 for (vr = VECTOR_DYNAMIC_START;
73 vr <= VECTOR_DYNAMIC_END; vr++) {
74 if (vector_to_irq[vr] == IRQ_INVALID) {
75 irqd->vector = vr;
76 vector_to_irq[vr] = irq;
77 break;
78 }
79 }
80 vr = (vr > VECTOR_DYNAMIC_END) ? VECTOR_INVALID : vr;
81 }
82 spinlock_irqrestore_release(&x86_irq_spinlock, rflags);
83 ret = vr;
84 } else {
85 pr_err("invalid irq[%u] to alloc vector", irq);
86 }
87
88 return ret;
89 }
90
request_irq_arch(uint32_t irq)91 bool request_irq_arch(uint32_t irq)
92 {
93 return (alloc_irq_vector(irq) != VECTOR_INVALID);
94 }
95
96 /* free the vector allocated via alloc_irq_vector() */
free_irq_vector(uint32_t irq)97 static void free_irq_vector(uint32_t irq)
98 {
99 struct x86_irq_data *irqd;
100 uint32_t vr;
101 uint64_t rflags;
102
103 if (irq < NR_IRQS) {
104 irqd = &irq_data[irq];
105 spinlock_irqsave_obtain(&x86_irq_spinlock, &rflags);
106
107 if (irqd->vector < VECTOR_FIXED_START) {
108 /* do nothing for LEGACY_IRQ and static allocated ones */
109 vr = irqd->vector;
110 irqd->vector = VECTOR_INVALID;
111
112 if ((vr <= NR_MAX_VECTOR) && (vector_to_irq[vr] == irq)) {
113 vector_to_irq[vr] = IRQ_INVALID;
114 }
115 }
116 spinlock_irqrestore_release(&x86_irq_spinlock, rflags);
117 }
118 }
119
free_irq_arch(uint32_t irq)120 void free_irq_arch(uint32_t irq)
121 {
122 free_irq_vector(irq);
123 }
124
irq_to_vector(uint32_t irq)125 uint32_t irq_to_vector(uint32_t irq)
126 {
127 uint64_t rflags;
128 uint32_t ret = VECTOR_INVALID;
129
130 if (irq < NR_IRQS) {
131 spinlock_irqsave_obtain(&x86_irq_spinlock, &rflags);
132 ret = irq_data[irq].vector;
133 spinlock_irqrestore_release(&x86_irq_spinlock, rflags);
134 }
135
136 return ret;
137 }
138
handle_spurious_interrupt(uint32_t vector)139 static void handle_spurious_interrupt(uint32_t vector)
140 {
141 send_lapic_eoi();
142
143 get_cpu_var(spurious)++;
144
145 pr_warn("Spurious vector: 0x%x.", vector);
146
147 if (spurious_handler != NULL) {
148 spurious_handler(vector);
149 }
150 }
151
irq_need_mask(const struct irq_desc * desc)152 static inline bool irq_need_mask(const struct irq_desc *desc)
153 {
154 /* level triggered gsi should be masked */
155 return (((desc->flags & IRQF_LEVEL) != 0U)
156 && is_ioapic_irq(desc->irq));
157 }
158
irq_need_unmask(const struct irq_desc * desc)159 static inline bool irq_need_unmask(const struct irq_desc *desc)
160 {
161 /* level triggered gsi for non-ptdev should be unmasked */
162 return (((desc->flags & IRQF_LEVEL) != 0U)
163 && ((desc->flags & IRQF_PT) == 0U)
164 && is_ioapic_irq(desc->irq));
165 }
166
pre_irq_arch(const struct irq_desc * desc)167 void pre_irq_arch(const struct irq_desc *desc)
168 {
169 if (irq_need_mask(desc)) {
170 ioapic_gsi_mask_irq(desc->irq);
171 }
172
173 /* Send EOI to LAPIC/IOAPIC IRR */
174 send_lapic_eoi();
175 }
176
post_irq_arch(const struct irq_desc * desc)177 void post_irq_arch(const struct irq_desc *desc)
178 {
179 if (irq_need_unmask(desc)) {
180 ioapic_gsi_unmask_irq(desc->irq);
181 }
182 }
183
dispatch_interrupt(const struct intr_excp_ctx * ctx)184 void dispatch_interrupt(const struct intr_excp_ctx *ctx)
185 {
186 uint32_t vr = ctx->vector;
187 uint32_t irq = vector_to_irq[vr];
188 struct x86_irq_data *irqd;
189
190 /* The value from vector_to_irq[] must be:
191 * IRQ_INVALID, which means the vector is not allocated;
192 * or
193 * < NR_IRQS, which is the irq number it bound with;
194 * Any other value means there is something wrong.
195 */
196 if (irq < NR_IRQS) {
197 irqd = &irq_data[irq];
198
199 if (vr == irqd->vector) {
200 #ifdef PROFILING_ON
201 /* Saves ctx info into irq_desc */
202 irqd->ctx_rip = ctx->rip;
203 irqd->ctx_rflags = ctx->rflags;
204 irqd->ctx_cs = ctx->cs;
205 #endif
206 /* Call the generic IRQ handling routine */
207 do_irq(irq);
208 }
209 } else {
210 handle_spurious_interrupt(vr);
211 }
212 }
213
214 /*
215 * descs[] must have NR_IRQS entries
216 */
init_irq_descs_arch(struct irq_desc descs[])217 void init_irq_descs_arch(struct irq_desc descs[])
218 {
219 uint32_t i;
220
221 /*
222 * Fill in #CONFIG_MAX_VM_NUM posted interrupt specific irq and vector pairs
223 * at runtime
224 */
225 for (i = 0U; i < CONFIG_MAX_VM_NUM; i++) {
226 uint32_t idx = i + NR_STATIC_MAPPINGS_1;
227
228 ASSERT(irq_static_mappings[idx].irq == 0U, "");
229 ASSERT(irq_static_mappings[idx].vector == 0U, "");
230
231 irq_static_mappings[idx].irq = POSTED_INTR_IRQ + i;
232 irq_static_mappings[idx].vector = POSTED_INTR_VECTOR + i;
233 }
234
235 for (i = 0U; i < NR_IRQS; i++) {
236 irq_data[i].vector = VECTOR_INVALID;
237 descs[i].arch_data = &irq_data[i];
238 }
239
240 for (i = 0U; i <= NR_MAX_VECTOR; i++) {
241 vector_to_irq[i] = IRQ_INVALID;
242 }
243
244 /* init fixed mapping for specific irq and vector */
245 for (i = 0U; i < NR_STATIC_MAPPINGS; i++) {
246 uint32_t irq = irq_static_mappings[i].irq;
247 uint32_t vr = irq_static_mappings[i].vector;
248
249 irq_data[irq].vector = vr;
250 vector_to_irq[vr] = irq;
251
252 reserve_irq_num(irq);
253 }
254 }
255
256 /* must be called after IRQ setup */
setup_irqs_arch(void)257 void setup_irqs_arch(void)
258 {
259 ioapic_setup_irqs();
260 }
261
disable_pic_irqs(void)262 static void disable_pic_irqs(void)
263 {
264 pio_write8(0xffU, 0xA1U);
265 pio_write8(0xffU, 0x21U);
266 }
267
fixup_idt(const struct host_idt_descriptor * idtd)268 static inline void fixup_idt(const struct host_idt_descriptor *idtd)
269 {
270 uint32_t i;
271 struct idt_64_descriptor *idt_desc = idtd->idt->host_idt_descriptors;
272 uint32_t entry_hi_32, entry_lo_32;
273
274 for (i = 0U; i < HOST_IDT_ENTRIES; i++) {
275 entry_lo_32 = idt_desc[i].offset_63_32;
276 entry_hi_32 = idt_desc[i].rsvd;
277 idt_desc[i].rsvd = 0U;
278 idt_desc[i].offset_63_32 = entry_hi_32;
279 idt_desc[i].high32.bits.offset_31_16 = (uint16_t)(entry_lo_32 >> 16U);
280 idt_desc[i].low32.bits.offset_15_0 = (uint16_t)entry_lo_32;
281 }
282 }
283
set_idt(struct host_idt_descriptor * idtd)284 static inline void set_idt(struct host_idt_descriptor *idtd)
285 {
286 asm volatile (" lidtq %[idtd]\n" : /* no output parameters */
287 : /* input parameters */
288 [idtd] "m"(*idtd));
289 }
290
init_interrupt_arch(uint16_t pcpu_id)291 void init_interrupt_arch(uint16_t pcpu_id)
292 {
293 struct host_idt_descriptor *idtd = &HOST_IDTR;
294
295 if (pcpu_id == BSP_CPU_ID) {
296 fixup_idt(idtd);
297 }
298 set_idt(idtd);
299 init_lapic(pcpu_id);
300
301 if (pcpu_id == BSP_CPU_ID) {
302 /* we use ioapic only, disable legacy PIC */
303 disable_pic_irqs();
304 }
305 }
306