1 /*-
2  * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
3  * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4  * Copyright (c) 2017-2022 Intel Corporation.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #define pr_prefix	"vioapic: "
31 
32 #include <asm/guest/vm.h>
33 #include <errno.h>
34 #include <asm/irq.h>
35 #include <asm/guest/ept.h>
36 #include <asm/guest/assign.h>
37 #include <logmsg.h>
38 #include <asm/ioapic.h>
39 
40 #define	RTBL_RO_BITS	((uint32_t)0x00004000U | (uint32_t)0x00001000U) /*Remote IRR and Delivery Status bits*/
41 
42 #define DBG_LEVEL_VIOAPIC	6U
43 #define ACRN_IOAPIC_VERSION	0x11U
44 
45 #define MASK_ALL_INTERRUPTS   0x0001000000010000UL
46 
vm_ioapics(const struct acrn_vm * vm)47 static inline struct acrn_vioapics *vm_ioapics(const struct acrn_vm *vm)
48 {
49 	return (struct acrn_vioapics *)&(vm->arch_vm.vioapics);
50 }
51 
52 /**
53  * @pre pin < vioapic->chipinfo.nr_pins
54  */
55 static void
vioapic_generate_intr(struct acrn_single_vioapic * vioapic,uint32_t pin)56 vioapic_generate_intr(struct acrn_single_vioapic *vioapic, uint32_t pin)
57 {
58 	uint32_t vector, dest, delmode;
59 	union ioapic_rte rte;
60 	bool level, phys;
61 
62 	rte = vioapic->rtbl[pin];
63 
64 	if (rte.bits.intr_mask == IOAPIC_RTE_MASK_SET) {
65 		dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: masked", pin);
66 	} else {
67 		phys = (rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
68 		delmode = rte.bits.delivery_mode;
69 		level = (rte.bits.trigger_mode == IOAPIC_RTE_TRGRMODE_LEVEL);
70 
71 		/* For level trigger irq, avoid send intr if
72 		 * previous one hasn't received EOI
73 		 */
74 		if (!level || (vioapic->rtbl[pin].bits.remote_irr == 0UL)) {
75 			if (level) {
76 				vioapic->rtbl[pin].bits.remote_irr = IOAPIC_RTE_REM_IRR;
77 			}
78 			vector = rte.bits.vector;
79 			dest = rte.bits.dest_field;
80 			vlapic_receive_intr(vioapic->vm, level, dest, phys, delmode, vector, false);
81 		}
82 	}
83 }
84 
85 /**
86  * @pre pin < vioapic->chipinfo.nr_pins
87  */
88 static void
vioapic_set_pinstate(struct acrn_single_vioapic * vioapic,uint32_t pin,uint32_t level)89 vioapic_set_pinstate(struct acrn_single_vioapic *vioapic, uint32_t pin, uint32_t level)
90 {
91 	uint32_t old_lvl;
92 	union ioapic_rte rte;
93 
94 	if (pin < vioapic->chipinfo.nr_pins) {
95 		rte = vioapic->rtbl[pin];
96 		old_lvl = (uint32_t)bitmap_test((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
97 		if (level == 0U) {
98 			/* clear pin_state and deliver interrupt according to polarity */
99 			bitmap_clear_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
100 			if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO)
101 				&& (old_lvl != level)) {
102 				vioapic_generate_intr(vioapic, pin);
103 			}
104 		} else {
105 			/* set pin_state and deliver intrrupt according to polarity */
106 			bitmap_set_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
107 			if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI)
108 				&& (old_lvl != level)) {
109 				vioapic_generate_intr(vioapic, pin);
110 			}
111 		}
112 	}
113 }
114 
115 
116 struct acrn_single_vioapic *
vgsi_to_vioapic_and_vpin(const struct acrn_vm * vm,uint32_t vgsi,uint32_t * vpin)117 vgsi_to_vioapic_and_vpin(const struct acrn_vm *vm, uint32_t vgsi, uint32_t *vpin)
118 {
119 	struct acrn_single_vioapic *vioapic;
120 	uint8_t vioapic_index = 0U;
121 
122 	if (is_service_vm(vm)) {
123 		/*
124 		 * Utilize platform ioapic_info for Service VM
125 		 */
126 		vioapic_index = get_gsi_to_ioapic_index(vgsi);
127 		if (vpin != NULL) {
128 			*vpin = gsi_to_ioapic_pin(vgsi);
129 		}
130 	} else {
131 		if (vpin != NULL) {
132 			*vpin = vgsi;
133 		}
134 	}
135 	vioapic = (struct acrn_single_vioapic *)&(vm->arch_vm.vioapics.vioapic_array[vioapic_index]);
136 	return vioapic;
137 }
138 
139 /**
140  * @brief Set vIOAPIC IRQ line status.
141  *
142  * Similar with vioapic_set_irqline_lock(),but would not make sure
143  * operation be done with ioapic lock.
144  *
145  * @param[in] vm        Pointer to target VM
146  * @param[in] vgsi   	Target GSI number
147  * @param[in] operation Action options: GSI_SET_HIGH/GSI_SET_LOW/
148  *			GSI_RAISING_PULSE/GSI_FALLING_PULSE
149  *
150  * @pre vgsi < get_vm_gsicount(vm)
151  * @pre vm != NULL
152  * @return None
153  */
154 void
vioapic_set_irqline_nolock(const struct acrn_vm * vm,uint32_t vgsi,uint32_t operation)155 vioapic_set_irqline_nolock(const struct acrn_vm *vm, uint32_t vgsi, uint32_t operation)
156 {
157 	struct acrn_single_vioapic *vioapic;
158 	uint32_t pin;
159 
160 	vioapic = vgsi_to_vioapic_and_vpin(vm, vgsi, &pin);
161 
162 	switch (operation) {
163 	case GSI_SET_HIGH:
164 		vioapic_set_pinstate(vioapic, pin, 1U);
165 		break;
166 	case GSI_SET_LOW:
167 		vioapic_set_pinstate(vioapic, pin, 0U);
168 		break;
169 	case GSI_RAISING_PULSE:
170 		vioapic_set_pinstate(vioapic, pin, 1U);
171 		vioapic_set_pinstate(vioapic, pin, 0U);
172 		break;
173 	case GSI_FALLING_PULSE:
174 		vioapic_set_pinstate(vioapic, pin, 0U);
175 		vioapic_set_pinstate(vioapic, pin, 1U);
176 		break;
177 	default:
178 		/*
179 		 * The function caller could guarantee the pre condition.
180 		 */
181 		break;
182 	}
183 }
184 
185 /**
186  * @brief Set vIOAPIC IRQ line status.
187  *
188  * @param[in] vm        Pointer to target VM
189  * @param[in] vgsi  	Target GSI number
190  * @param[in] operation Action options: GSI_SET_HIGH/GSI_SET_LOW/
191  *			GSI_RAISING_PULSE/GSI_FALLING_PULSE
192  *
193  * @pre vgsi < get_vm_gsicount(vm)
194  * @pre vm != NULL
195  */
196 void
vioapic_set_irqline_lock(const struct acrn_vm * vm,uint32_t vgsi,uint32_t operation)197 vioapic_set_irqline_lock(const struct acrn_vm *vm, uint32_t vgsi, uint32_t operation)
198 {
199 	uint64_t rflags;
200 	struct acrn_single_vioapic *vioapic;
201 
202 	vioapic = vgsi_to_vioapic_and_vpin(vm, vgsi, NULL);
203 	spinlock_irqsave_obtain(&(vioapic->lock), &rflags);
204 	vioapic_set_irqline_nolock(vm, vgsi, operation);
205 	spinlock_irqrestore_release(&(vioapic->lock), rflags);
206 }
207 
208 static uint32_t
vioapic_indirect_read(struct acrn_single_vioapic * vioapic,uint32_t addr)209 vioapic_indirect_read(struct acrn_single_vioapic *vioapic, uint32_t addr)
210 {
211 	uint32_t regnum, ret = 0U;
212 	uint32_t pin, pincount = vioapic->chipinfo.nr_pins;
213 
214 	regnum = addr & 0xffU;
215 	switch (regnum) {
216 	case IOAPIC_ID:
217 		ret = (uint32_t)vioapic->chipinfo.id << IOAPIC_ID_SHIFT;
218 		break;
219 	case IOAPIC_VER:
220 		ret = ((pincount - 1U) << MAX_RTE_SHIFT) | ACRN_IOAPIC_VERSION;
221 		break;
222 	case IOAPIC_ARB:
223 		ret = (uint32_t)vioapic->chipinfo.id << IOAPIC_ID_SHIFT;
224 		break;
225 	default:
226 		/*
227 		 * In this switch statement, regnum shall either be IOAPIC_ID or
228 		 * IOAPIC_VER or IOAPIC_ARB.
229 		 * All the other cases will be handled properly later after this
230 		 * switch statement.
231 		 */
232 		break;
233 	}
234 
235 	/* redirection table entries */
236 	if ((regnum >= IOAPIC_REDTBL) &&
237 	    (regnum < (IOAPIC_REDTBL + (pincount * 2U)))) {
238 		uint32_t addr_offset = regnum - IOAPIC_REDTBL;
239 		uint32_t rte_offset = addr_offset >> 1U;
240 		pin = rte_offset;
241 		if ((addr_offset & 0x1U) != 0U) {
242 			ret = vioapic->rtbl[pin].u.hi_32;
243 		} else {
244 			if (is_lapic_pt_configured(vioapic->vm) && (vioapic->rtbl[pin].bits.trigger_mode != 0UL)) {
245 				/*
246 				 * For local APIC passthrough case, EOI would not trigger VM-exit. So virtual
247 				 * 'Remote IRR' would not be updated. Needs to read physical IOxAPIC RTE to
248 				 * update virtual 'Remote IRR' field each time when guest wants to read I/O
249 				 * REDIRECTION TABLE REGISTERS
250 				 */
251 				struct ptirq_remapping_info *entry = NULL;
252 				union ioapic_rte phys_rte = {};
253 				DEFINE_INTX_SID(virt_sid, vioapic->rtbl[pin].bits.vector, INTX_CTLR_IOAPIC);
254 
255 				entry = find_ptirq_entry(PTDEV_INTR_INTX, &virt_sid, vioapic->vm);
256 				if (entry != NULL) {
257 					ioapic_get_rte(entry->allocated_pirq, &phys_rte);
258 					vioapic->rtbl[pin].bits.remote_irr = phys_rte.bits.remote_irr;
259 				}
260 			}
261 			ret = vioapic->rtbl[pin].u.lo_32;
262 		}
263 	}
264 
265 	return ret;
266 }
267 
vioapic_need_intr(const struct acrn_single_vioapic * vioapic,uint16_t pin)268 static inline bool vioapic_need_intr(const struct acrn_single_vioapic *vioapic, uint16_t pin)
269 {
270 	uint32_t lvl;
271 	union ioapic_rte rte;
272 	bool ret = false;
273 
274 	if ((uint32_t)pin < vioapic->chipinfo.nr_pins) {
275 		rte = vioapic->rtbl[pin];
276 		lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
277 		ret = !!(((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO) && (lvl == 0U)) ||
278 			((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI) && (lvl != 0U)));
279 	}
280 
281 	return ret;
282 }
283 
284 /*
285  * Due to the race between vcpus and vioapic->lock could be accessed from softirq, ensure to do
286  * spinlock_irqsave_obtain(&(vioapic->lock), &rflags) & spinlock_irqrestore_release(&(vioapic->lock), rflags)
287  * by caller.
288  */
vioapic_indirect_write(struct acrn_single_vioapic * vioapic,uint32_t addr,uint32_t data)289 static void vioapic_indirect_write(struct acrn_single_vioapic *vioapic, uint32_t addr, uint32_t data)
290 {
291 	union ioapic_rte last, new, changed;
292 	uint32_t regnum;
293 	uint32_t pin, pincount = vioapic->chipinfo.nr_pins;
294 
295 	regnum = addr & 0xffUL;
296 	switch (regnum) {
297 	case IOAPIC_ID:
298 		vioapic->chipinfo.id = (uint8_t)((data & IOAPIC_ID_MASK) >> IOAPIC_ID_SHIFT);
299 		break;
300 	case IOAPIC_VER:
301 	case IOAPIC_ARB:
302 		/* readonly */
303 		break;
304 	default:
305 		/*
306 		 * In this switch statement, regnum shall either be IOAPIC_ID or
307 		 * IOAPIC_VER or IOAPIC_ARB.
308 		 * All the other cases will be handled properly later after this
309 		 * switch statement.
310 		 */
311 		break;
312 	}
313 
314 	/* redirection table entries */
315 	if ((regnum >= IOAPIC_REDTBL) && (regnum < (IOAPIC_REDTBL + (pincount * 2U)))) {
316 		bool wire_mode_valid = true;
317 		uint32_t addr_offset = regnum - IOAPIC_REDTBL;
318 		uint32_t rte_offset = addr_offset >> 1U;
319 		pin = rte_offset;
320 
321 		last = vioapic->rtbl[pin];
322 		new = last;
323 		if ((addr_offset & 1U) != 0U) {
324 			new.u.hi_32 = data;
325 		} else {
326 			new.u.lo_32 &= RTBL_RO_BITS;
327 			new.u.lo_32 |= (data & ~RTBL_RO_BITS);
328 		}
329 
330 		/* In some special scenarios, the LAPIC somehow hasn't send
331 		 * EOI to IOAPIC which cause the Remote IRR bit can't be clear.
332 		 * To clear it, some OSes will use EOI Register to clear it for
333 		 * 0x20 version IOAPIC, otherwise use switch Trigger Mode to
334 		 * Edge Sensitive to clear it.
335 		 */
336 		if (new.bits.trigger_mode == IOAPIC_RTE_TRGRMODE_EDGE) {
337 			new.bits.remote_irr = 0U;
338 		}
339 
340 		changed.full = last.full ^ new.full;
341 		/* pin0 from vpic mask/unmask */
342 		if ((pin == 0U) && (changed.bits.intr_mask != 0UL)) {
343 			/* mask -> umask */
344 			if (last.bits.intr_mask == IOAPIC_RTE_MASK_SET) {
345 				if ((vioapic->vm->wire_mode == VPIC_WIRE_NULL) ||
346 						(vioapic->vm->wire_mode == VPIC_WIRE_INTR)) {
347 					vioapic->vm->wire_mode = VPIC_WIRE_IOAPIC;
348 					dev_dbg(DBG_LEVEL_VIOAPIC, "vpic wire mode -> IOAPIC");
349 				} else {
350 					pr_err("WARNING: invalid vpic wire mode change");
351 					wire_mode_valid = false;
352 				}
353 			/* unmask -> mask */
354 			} else {
355 				if (vioapic->vm->wire_mode == VPIC_WIRE_IOAPIC) {
356 					vioapic->vm->wire_mode = VPIC_WIRE_INTR;
357 					dev_dbg(DBG_LEVEL_VIOAPIC, "vpic wire mode -> INTR");
358 				}
359 			}
360 		}
361 
362 		if (wire_mode_valid) {
363 			vioapic->rtbl[pin] = new;
364 			dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: redir table entry %#lx",
365 				pin, vioapic->rtbl[pin].full);
366 
367 			/* remap for ptdev */
368 			if ((new.bits.intr_mask == IOAPIC_RTE_MASK_CLR) || (last.bits.intr_mask  == IOAPIC_RTE_MASK_CLR)) {
369 				/* VM enable intr */
370 				/* NOTE: only support max 256 pin */
371 
372 				(void)ptirq_intx_pin_remap(vioapic->vm, vioapic->chipinfo.gsi_base + pin, INTX_CTLR_IOAPIC);
373 			}
374 
375 			/*
376 			 * Generate an interrupt if the following conditions are met:
377 			 * - pin is not masked
378 			 * - previous interrupt has been EOIed
379 			 * - pin level is asserted
380 			 */
381 			if ((vioapic->rtbl[pin].bits.intr_mask == IOAPIC_RTE_MASK_CLR) &&
382 				(vioapic->rtbl[pin].bits.remote_irr == 0UL) &&
383 				vioapic_need_intr(vioapic, (uint16_t)pin)) {
384 				dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic pin%hhu: asserted at rtbl write", pin);
385 				vioapic_generate_intr(vioapic, pin);
386 			}
387 		}
388 	}
389 }
390 
391 static void
vioapic_mmio_rw(struct acrn_single_vioapic * vioapic,uint64_t gpa,uint32_t * data,bool do_read)392 vioapic_mmio_rw(struct acrn_single_vioapic *vioapic, uint64_t gpa,
393 		uint32_t *data, bool do_read)
394 {
395 	uint32_t offset;
396 	uint64_t rflags;
397 
398 	offset = (uint32_t)(gpa - vioapic->chipinfo.addr);
399 
400 	spinlock_irqsave_obtain(&(vioapic->lock), &rflags);
401 
402 	/* The IOAPIC specification allows 32-bit wide accesses to the
403 	 * IOAPIC_REGSEL (offset 0) and IOAPIC_WINDOW (offset 16) registers.
404 	 */
405 	switch (offset) {
406 	case IOAPIC_REGSEL:
407 		if (do_read) {
408 			*data = vioapic->ioregsel;
409 		} else {
410 			vioapic->ioregsel = *data & 0xFFU;
411 		}
412 		break;
413 	case IOAPIC_WINDOW:
414 		if (do_read) {
415 			*data = vioapic_indirect_read(vioapic,
416 							vioapic->ioregsel);
417 		} else {
418 			vioapic_indirect_write(vioapic,
419 						 vioapic->ioregsel, *data);
420 		}
421 		break;
422 	default:
423 		if (do_read) {
424 			*data = 0xFFFFFFFFU;
425 		}
426 		break;
427 	}
428 
429 	spinlock_irqrestore_release(&(vioapic->lock), rflags);
430 }
431 
432 /*
433  * @pre vm != NULL
434  */
435 static void
vioapic_process_eoi(struct acrn_single_vioapic * vioapic,uint32_t vector)436 vioapic_process_eoi(struct acrn_single_vioapic *vioapic, uint32_t vector)
437 {
438 	uint32_t pin, pincount = vioapic->chipinfo.nr_pins;
439 	union ioapic_rte rte;
440 	uint64_t rflags;
441 
442 	if ((vector < VECTOR_DYNAMIC_START) || (vector > NR_MAX_VECTOR)) {
443 		pr_err("vioapic_process_eoi: invalid vector %u", vector);
444 	}
445 
446 	dev_dbg(DBG_LEVEL_VIOAPIC, "ioapic processing eoi for vector %u", vector);
447 
448 	/* notify device to ack if assigned pin */
449 	for (pin = 0U; pin < pincount; pin++) {
450 		rte = vioapic->rtbl[pin];
451 		if ((rte.bits.vector != vector) ||
452 			(rte.bits.remote_irr == 0U)) {
453 			continue;
454 		}
455 
456 		ptirq_intx_ack(vioapic->vm, vioapic->chipinfo.gsi_base + pin, INTX_CTLR_IOAPIC);
457 	}
458 
459 	/*
460 	 * XXX keep track of the pins associated with this vector instead
461 	 * of iterating on every single pin each time.
462 	 */
463 	spinlock_irqsave_obtain(&(vioapic->lock), &rflags);
464 	for (pin = 0U; pin < pincount; pin++) {
465 		rte = vioapic->rtbl[pin];
466 		if ((rte.bits.vector != vector) ||
467 			(rte.bits.remote_irr == 0U)) {
468 			continue;
469 		}
470 
471 		vioapic->rtbl[pin].bits.remote_irr = 0U;
472 		if (vioapic_need_intr(vioapic, (uint16_t)pin)) {
473 			dev_dbg(DBG_LEVEL_VIOAPIC,
474 				"ioapic pin%hhu: asserted at eoi", pin);
475 			vioapic_generate_intr(vioapic, pin);
476 		}
477 	}
478 	spinlock_irqrestore_release(&(vioapic->lock), rflags);
479 }
480 
vioapic_broadcast_eoi(const struct acrn_vm * vm,uint32_t vector)481 void vioapic_broadcast_eoi(const struct acrn_vm *vm, uint32_t vector)
482 {
483 	struct acrn_single_vioapic *vioapic;
484 	uint8_t vioapic_index;
485 
486 	/*
487 	 * For platforms with multiple IO-APICs, EOI message from LAPIC is
488 	 * broadcast to all IO-APICs. Emulating the same behavior here.
489 	 */
490 
491 	for (vioapic_index = 0U; vioapic_index < vm->arch_vm.vioapics.ioapic_num; vioapic_index++) {
492 		vioapic = &(vm_ioapics(vm)->vioapic_array[vioapic_index]);
493 		vioapic_process_eoi(vioapic, vector);
494 	}
495 }
496 
reset_one_vioapic(struct acrn_single_vioapic * vioapic)497 static void reset_one_vioapic(struct acrn_single_vioapic *vioapic)
498 {
499 	uint32_t pin, pincount;
500 
501 	/* Initialize all redirection entries to mask all interrupts */
502 	pincount = vioapic->chipinfo.nr_pins;
503 	for (pin = 0U; pin < pincount; pin++) {
504 		vioapic->rtbl[pin].full = MASK_ALL_INTERRUPTS;
505 	}
506 	vioapic->chipinfo.id = 0U;
507 	vioapic->ioregsel = 0U;
508 }
509 
reset_vioapics(const struct acrn_vm * vm)510 void reset_vioapics(const struct acrn_vm *vm)
511 {
512 	struct acrn_vioapics *vioapics = vm_ioapics(vm);
513 	uint8_t vioapic_index;
514 
515 	for (vioapic_index = 0U; vioapic_index < vioapics->ioapic_num; vioapic_index++) {
516 		reset_one_vioapic(&vioapics->vioapic_array[vioapic_index]);
517 	}
518 }
519 
520 void
vioapic_init(struct acrn_vm * vm)521 vioapic_init(struct acrn_vm *vm)
522 {
523 	static struct ioapic_info virt_ioapic_info = {
524 		.nr_pins = VIOAPIC_RTE_NUM,
525 		.addr = VIOAPIC_BASE
526 	};
527 
528 	struct ioapic_info *vioapic_info;
529 	uint8_t vioapic_index;
530 	struct acrn_single_vioapic *vioapic = NULL;
531 
532 	if (is_service_vm(vm)) {
533 		vm->arch_vm.vioapics.ioapic_num = get_platform_ioapic_info(&vioapic_info);
534 	} else {
535 		vm->arch_vm.vioapics.ioapic_num = 1U;
536 		vioapic_info = &virt_ioapic_info;
537 	}
538 
539 	for (vioapic_index = 0U; vioapic_index < vm->arch_vm.vioapics.ioapic_num; vioapic_index++) {
540 		vioapic = &vm->arch_vm.vioapics.vioapic_array[vioapic_index];
541 		spinlock_init(&(vioapic->lock));
542 		vioapic->chipinfo = vioapic_info[vioapic_index];
543 
544 		vioapic->vm = vm;
545 		reset_one_vioapic(vioapic);
546 
547 		register_mmio_emulation_handler(vm, vioapic_mmio_access_handler, (uint64_t)vioapic->chipinfo.addr,
548 					(uint64_t)vioapic->chipinfo.addr + VIOAPIC_SIZE, (void *)vioapic, false);
549 		ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, (uint64_t)vioapic->chipinfo.addr, VIOAPIC_SIZE);
550 	}
551 
552 	/*
553 	 * Maximum number of GSI is computed as GSI base of the IOAPIC i.e. enumerated last in ACPI MADT
554 	 * plus the number of interrupt pins of that IOAPIC.
555 	 */
556 	if (vioapic != NULL) {
557 		vm->arch_vm.vioapics.nr_gsi = vioapic->chipinfo.gsi_base + vioapic->chipinfo.nr_pins;
558 	}
559 }
560 
561 uint32_t
get_vm_gsicount(const struct acrn_vm * vm)562 get_vm_gsicount(const struct acrn_vm *vm)
563 {
564 	return vm->arch_vm.vioapics.nr_gsi;
565 }
566 
567 /*
568  * @pre handler_private_data != NULL
569  */
vioapic_mmio_access_handler(struct io_request * io_req,void * handler_private_data)570 int32_t vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data)
571 {
572 	struct acrn_single_vioapic *vioapic = (struct acrn_single_vioapic *)handler_private_data;
573 	struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
574 	uint64_t gpa = mmio->address;
575 	int32_t ret = 0;
576 
577 	/* Note all RW to IOAPIC are 32-Bit in size */
578 	if (mmio->size == 4UL) {
579 		uint32_t data = (uint32_t)mmio->value;
580 
581 		if (mmio->direction == ACRN_IOREQ_DIR_READ) {
582 			vioapic_mmio_rw(vioapic, gpa, &data, true);
583 			mmio->value = (uint64_t)data;
584 		} else if (mmio->direction == ACRN_IOREQ_DIR_WRITE) {
585 			vioapic_mmio_rw(vioapic, gpa, &data, false);
586 		} else {
587 			ret = -EINVAL;
588 		}
589 	} else {
590 		pr_err("All RW to IOAPIC must be 32-bits in size");
591 		ret = -EINVAL;
592 	}
593 
594 	return ret;
595 }
596 
597 /**
598  * @pre vm->arch_vm.vioapics != NULL
599  * @pre vgsi < get_vm_gsicount(vm)
600  * @pre rte != NULL
601  */
vioapic_get_rte(const struct acrn_vm * vm,uint32_t vgsi,union ioapic_rte * rte)602 void vioapic_get_rte(const struct acrn_vm *vm, uint32_t vgsi, union ioapic_rte *rte)
603 {
604 	struct acrn_single_vioapic *vioapic;
605 	uint32_t pin;
606 	vioapic = vgsi_to_vioapic_and_vpin(vm, vgsi, &pin);
607 
608 	*rte = vioapic->rtbl[pin];
609 }
610