1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
22 *
23 * Yunhong Jiang <yunhong.jiang@intel.com>
24 * Ported to xen by using virtual IRQ line.
25 */
26
27 #include <xen/types.h>
28 #include <xen/mm.h>
29 #include <xen/xmalloc.h>
30 #include <xen/lib.h>
31 #include <xen/errno.h>
32 #include <xen/sched.h>
33 #include <public/hvm/ioreq.h>
34 #include <asm/hvm/io.h>
35 #include <asm/hvm/vpic.h>
36 #include <asm/hvm/vlapic.h>
37 #include <asm/hvm/support.h>
38 #include <asm/current.h>
39 #include <asm/event.h>
40 #include <asm/io_apic.h>
41
42 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
43 #define IRQ0_SPECIAL_ROUTING 1
44
45 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int irq);
46
addr_vioapic(const struct domain * d,unsigned long addr)47 static struct hvm_vioapic *addr_vioapic(const struct domain *d,
48 unsigned long addr)
49 {
50 unsigned int i;
51
52 for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
53 {
54 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
55
56 if ( addr >= vioapic->base_address &&
57 addr < vioapic->base_address + VIOAPIC_MEM_LENGTH )
58 return vioapic;
59 }
60
61 return NULL;
62 }
63
gsi_vioapic(const struct domain * d,unsigned int gsi,unsigned int * pin)64 static struct hvm_vioapic *gsi_vioapic(const struct domain *d,
65 unsigned int gsi, unsigned int *pin)
66 {
67 unsigned int i;
68
69 for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
70 {
71 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
72
73 if ( gsi >= vioapic->base_gsi &&
74 gsi < vioapic->base_gsi + vioapic->nr_pins )
75 {
76 *pin = gsi - vioapic->base_gsi;
77 return vioapic;
78 }
79 }
80
81 return NULL;
82 }
83
vioapic_read_indirect(const struct hvm_vioapic * vioapic)84 static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic)
85 {
86 uint32_t result = 0;
87
88 switch ( vioapic->ioregsel )
89 {
90 case VIOAPIC_REG_VERSION:
91 result = ((union IO_APIC_reg_01){
92 .bits = { .version = VIOAPIC_VERSION_ID,
93 .entries = vioapic->nr_pins - 1 }
94 }).raw;
95 break;
96
97 case VIOAPIC_REG_APIC_ID:
98 /*
99 * Using union IO_APIC_reg_02 for the ID register too, as
100 * union IO_APIC_reg_00's ID field is 8 bits wide for some reason.
101 */
102 case VIOAPIC_REG_ARB_ID:
103 result = ((union IO_APIC_reg_02){
104 .bits = { .arbitration = vioapic->id }
105 }).raw;
106 break;
107
108 default:
109 {
110 uint32_t redir_index = (vioapic->ioregsel - VIOAPIC_REG_RTE0) >> 1;
111 uint64_t redir_content;
112
113 if ( redir_index >= vioapic->nr_pins )
114 {
115 gdprintk(XENLOG_WARNING, "apic_mem_readl:undefined ioregsel %x\n",
116 vioapic->ioregsel);
117 break;
118 }
119
120 redir_content = vioapic->redirtbl[redir_index].bits;
121 result = (vioapic->ioregsel & 1) ? (redir_content >> 32)
122 : redir_content;
123 break;
124 }
125 }
126
127 return result;
128 }
129
vioapic_read(struct vcpu * v,unsigned long addr,unsigned int length,unsigned long * pval)130 static int vioapic_read(
131 struct vcpu *v, unsigned long addr,
132 unsigned int length, unsigned long *pval)
133 {
134 const struct hvm_vioapic *vioapic;
135 uint32_t result;
136
137 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr);
138
139 vioapic = addr_vioapic(v->domain, addr);
140 ASSERT(vioapic);
141
142 switch ( addr & 0xff )
143 {
144 case VIOAPIC_REG_SELECT:
145 result = vioapic->ioregsel;
146 break;
147
148 case VIOAPIC_REG_WINDOW:
149 result = vioapic_read_indirect(vioapic);
150 break;
151
152 default:
153 result = 0;
154 break;
155 }
156
157 *pval = result;
158 return X86EMUL_OKAY;
159 }
160
vioapic_hwdom_map_gsi(unsigned int gsi,unsigned int trig,unsigned int pol)161 static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig,
162 unsigned int pol)
163 {
164 struct domain *currd = current->domain;
165 struct xen_domctl_bind_pt_irq pt_irq_bind = {
166 .irq_type = PT_IRQ_TYPE_PCI,
167 .machine_irq = gsi,
168 };
169 int ret, pirq = gsi;
170
171 ASSERT(is_hardware_domain(currd));
172
173 /* Interrupt has been unmasked, bind it now. */
174 ret = mp_register_gsi(gsi, trig, pol);
175 if ( ret == -EEXIST )
176 return 0;
177 if ( ret )
178 {
179 gprintk(XENLOG_WARNING, "vioapic: error registering GSI %u: %d\n",
180 gsi, ret);
181 return ret;
182 }
183
184 ret = allocate_and_map_gsi_pirq(currd, pirq, &pirq);
185 if ( ret )
186 {
187 gprintk(XENLOG_WARNING, "vioapic: error mapping GSI %u: %d\n",
188 gsi, ret);
189 return ret;
190 }
191
192 pcidevs_lock();
193 ret = pt_irq_create_bind(currd, &pt_irq_bind);
194 if ( ret )
195 {
196 gprintk(XENLOG_WARNING, "vioapic: error binding GSI %u: %d\n",
197 gsi, ret);
198 spin_lock(&currd->event_lock);
199 unmap_domain_pirq(currd, pirq);
200 spin_unlock(&currd->event_lock);
201 }
202 pcidevs_unlock();
203
204 return ret;
205 }
206
vioapic_write_redirent(struct hvm_vioapic * vioapic,unsigned int idx,int top_word,uint32_t val)207 static void vioapic_write_redirent(
208 struct hvm_vioapic *vioapic, unsigned int idx,
209 int top_word, uint32_t val)
210 {
211 struct domain *d = vioapic_domain(vioapic);
212 struct hvm_irq *hvm_irq = hvm_domain_irq(d);
213 union vioapic_redir_entry *pent, ent;
214 int unmasked = 0;
215 unsigned int gsi = vioapic->base_gsi + idx;
216
217 spin_lock(&d->arch.hvm_domain.irq_lock);
218
219 pent = &vioapic->redirtbl[idx];
220 ent = *pent;
221
222 if ( top_word )
223 {
224 /* Contains only the dest_id. */
225 ent.bits = (uint32_t)ent.bits | ((uint64_t)val << 32);
226 }
227 else
228 {
229 unmasked = ent.fields.mask;
230 /* Remote IRR and Delivery Status are read-only. */
231 ent.bits = ((ent.bits >> 32) << 32) | val;
232 ent.fields.delivery_status = 0;
233 ent.fields.remote_irr = pent->fields.remote_irr;
234 unmasked = unmasked && !ent.fields.mask;
235 }
236
237 *pent = ent;
238
239 if ( is_hardware_domain(d) && unmasked )
240 {
241 int ret;
242
243 ret = vioapic_hwdom_map_gsi(gsi, ent.fields.trig_mode,
244 ent.fields.polarity);
245 if ( ret )
246 {
247 /* Mask the entry again. */
248 pent->fields.mask = 1;
249 unmasked = 0;
250 }
251 }
252
253 if ( gsi == 0 )
254 {
255 vlapic_adjust_i8259_target(d);
256 }
257 else if ( ent.fields.trig_mode == VIOAPIC_EDGE_TRIG )
258 pent->fields.remote_irr = 0;
259 else if ( !ent.fields.mask &&
260 !ent.fields.remote_irr &&
261 hvm_irq->gsi_assert_count[idx] )
262 {
263 pent->fields.remote_irr = 1;
264 vioapic_deliver(vioapic, idx);
265 }
266
267 spin_unlock(&d->arch.hvm_domain.irq_lock);
268
269 if ( gsi == 0 || unmasked )
270 pt_may_unmask_irq(d, NULL);
271 }
272
vioapic_write_indirect(struct hvm_vioapic * vioapic,uint32_t val)273 static void vioapic_write_indirect(
274 struct hvm_vioapic *vioapic, uint32_t val)
275 {
276 switch ( vioapic->ioregsel )
277 {
278 case VIOAPIC_REG_VERSION:
279 /* Writes are ignored. */
280 break;
281
282 case VIOAPIC_REG_APIC_ID:
283 /*
284 * Presumably because we emulate an Intel IOAPIC which only has a
285 * 4 bit ID field (compared to 8 for AMD), using union IO_APIC_reg_02
286 * for the ID register (union IO_APIC_reg_00's ID field is 8 bits).
287 */
288 vioapic->id = ((union IO_APIC_reg_02){ .raw = val }).bits.arbitration;
289 break;
290
291 case VIOAPIC_REG_ARB_ID:
292 break;
293
294 default:
295 {
296 uint32_t redir_index = (vioapic->ioregsel - VIOAPIC_REG_RTE0) >> 1;
297
298 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "rte[%02x].%s = %08x",
299 redir_index, vioapic->ioregsel & 1 ? "hi" : "lo", val);
300
301 if ( redir_index >= vioapic->nr_pins )
302 {
303 gdprintk(XENLOG_WARNING, "vioapic_write_indirect "
304 "error register %x\n", vioapic->ioregsel);
305 break;
306 }
307
308 vioapic_write_redirent(
309 vioapic, redir_index, vioapic->ioregsel&1, val);
310 break;
311 }
312 }
313 }
314
vioapic_write(struct vcpu * v,unsigned long addr,unsigned int length,unsigned long val)315 static int vioapic_write(
316 struct vcpu *v, unsigned long addr,
317 unsigned int length, unsigned long val)
318 {
319 struct hvm_vioapic *vioapic;
320
321 vioapic = addr_vioapic(v->domain, addr);
322 ASSERT(vioapic);
323
324 switch ( addr & 0xff )
325 {
326 case VIOAPIC_REG_SELECT:
327 vioapic->ioregsel = val;
328 break;
329
330 case VIOAPIC_REG_WINDOW:
331 vioapic_write_indirect(vioapic, val);
332 break;
333
334 #if VIOAPIC_VERSION_ID >= 0x20
335 case VIOAPIC_REG_EOI:
336 vioapic_update_EOI(v->domain, val);
337 break;
338 #endif
339
340 default:
341 break;
342 }
343
344 return X86EMUL_OKAY;
345 }
346
vioapic_range(struct vcpu * v,unsigned long addr)347 static int vioapic_range(struct vcpu *v, unsigned long addr)
348 {
349 return !!addr_vioapic(v->domain, addr);
350 }
351
352 static const struct hvm_mmio_ops vioapic_mmio_ops = {
353 .check = vioapic_range,
354 .read = vioapic_read,
355 .write = vioapic_write
356 };
357
ioapic_inj_irq(struct hvm_vioapic * vioapic,struct vlapic * target,uint8_t vector,uint8_t trig_mode,uint8_t delivery_mode)358 static void ioapic_inj_irq(
359 struct hvm_vioapic *vioapic,
360 struct vlapic *target,
361 uint8_t vector,
362 uint8_t trig_mode,
363 uint8_t delivery_mode)
364 {
365 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
366 vector, trig_mode, delivery_mode);
367
368 ASSERT((delivery_mode == dest_Fixed) ||
369 (delivery_mode == dest_LowestPrio));
370
371 vlapic_set_irq(target, vector, trig_mode);
372 }
373
pit_channel0_enabled(void)374 static inline int pit_channel0_enabled(void)
375 {
376 return pt_active(¤t->domain->arch.vpit.pt0);
377 }
378
vioapic_deliver(struct hvm_vioapic * vioapic,unsigned int pin)379 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
380 {
381 uint16_t dest = vioapic->redirtbl[pin].fields.dest_id;
382 uint8_t dest_mode = vioapic->redirtbl[pin].fields.dest_mode;
383 uint8_t delivery_mode = vioapic->redirtbl[pin].fields.delivery_mode;
384 uint8_t vector = vioapic->redirtbl[pin].fields.vector;
385 uint8_t trig_mode = vioapic->redirtbl[pin].fields.trig_mode;
386 struct domain *d = vioapic_domain(vioapic);
387 struct vlapic *target;
388 struct vcpu *v;
389 unsigned int irq = vioapic->base_gsi + pin;
390
391 ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
392
393 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
394 "dest=%x dest_mode=%x delivery_mode=%x "
395 "vector=%x trig_mode=%x",
396 dest, dest_mode, delivery_mode, vector, trig_mode);
397
398 switch ( delivery_mode )
399 {
400 case dest_LowestPrio:
401 {
402 #ifdef IRQ0_SPECIAL_ROUTING
403 /* Force round-robin to pick VCPU 0 */
404 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
405 {
406 v = d->vcpu ? d->vcpu[0] : NULL;
407 target = v ? vcpu_vlapic(v) : NULL;
408 }
409 else
410 #endif
411 target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
412 if ( target != NULL )
413 {
414 ioapic_inj_irq(vioapic, target, vector, trig_mode, delivery_mode);
415 }
416 else
417 {
418 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
419 "vector=%x delivery_mode=%x",
420 vector, dest_LowestPrio);
421 }
422 break;
423 }
424
425 case dest_Fixed:
426 {
427 #ifdef IRQ0_SPECIAL_ROUTING
428 /* Do not deliver timer interrupts to VCPU != 0 */
429 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
430 {
431 if ( (v = d->vcpu ? d->vcpu[0] : NULL) != NULL )
432 ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector,
433 trig_mode, delivery_mode);
434 }
435 else
436 #endif
437 {
438 for_each_vcpu ( d, v )
439 if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
440 0, dest, dest_mode) )
441 ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector,
442 trig_mode, delivery_mode);
443 }
444 break;
445 }
446
447 case dest_NMI:
448 {
449 for_each_vcpu ( d, v )
450 if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
451 0, dest, dest_mode) &&
452 !test_and_set_bool(v->nmi_pending) )
453 vcpu_kick(v);
454 break;
455 }
456
457 default:
458 gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
459 delivery_mode);
460 break;
461 }
462 }
463
vioapic_irq_positive_edge(struct domain * d,unsigned int irq)464 void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
465 {
466 unsigned int pin;
467 struct hvm_vioapic *vioapic = gsi_vioapic(d, irq, &pin);
468 union vioapic_redir_entry *ent;
469
470 if ( !vioapic )
471 {
472 ASSERT_UNREACHABLE();
473 return;
474 }
475
476 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
477
478 ASSERT(pin < vioapic->nr_pins);
479 ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
480
481 ent = &vioapic->redirtbl[pin];
482 if ( ent->fields.mask )
483 return;
484
485 if ( ent->fields.trig_mode == VIOAPIC_EDGE_TRIG )
486 {
487 vioapic_deliver(vioapic, pin);
488 }
489 else if ( !ent->fields.remote_irr )
490 {
491 ent->fields.remote_irr = 1;
492 vioapic_deliver(vioapic, pin);
493 }
494 }
495
vioapic_update_EOI(struct domain * d,u8 vector)496 void vioapic_update_EOI(struct domain *d, u8 vector)
497 {
498 struct hvm_irq *hvm_irq = hvm_domain_irq(d);
499 union vioapic_redir_entry *ent;
500 unsigned int i;
501
502 ASSERT(has_vioapic(d));
503
504 spin_lock(&d->arch.hvm_domain.irq_lock);
505
506 for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
507 {
508 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
509 unsigned int pin;
510
511 for ( pin = 0; pin < vioapic->nr_pins; pin++ )
512 {
513 ent = &vioapic->redirtbl[pin];
514 if ( ent->fields.vector != vector )
515 continue;
516
517 ent->fields.remote_irr = 0;
518
519 if ( iommu_enabled )
520 {
521 spin_unlock(&d->arch.hvm_domain.irq_lock);
522 hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
523 spin_lock(&d->arch.hvm_domain.irq_lock);
524 }
525
526 if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
527 !ent->fields.mask &&
528 hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
529 {
530 ent->fields.remote_irr = 1;
531 vioapic_deliver(vioapic, pin);
532 }
533 }
534 }
535
536 spin_unlock(&d->arch.hvm_domain.irq_lock);
537 }
538
vioapic_get_mask(const struct domain * d,unsigned int gsi)539 int vioapic_get_mask(const struct domain *d, unsigned int gsi)
540 {
541 unsigned int pin;
542 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
543
544 if ( !vioapic )
545 return -EINVAL;
546
547 return vioapic->redirtbl[pin].fields.mask;
548 }
549
vioapic_get_vector(const struct domain * d,unsigned int gsi)550 int vioapic_get_vector(const struct domain *d, unsigned int gsi)
551 {
552 unsigned int pin;
553 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
554
555 if ( !vioapic )
556 return -EINVAL;
557
558 return vioapic->redirtbl[pin].fields.vector;
559 }
560
vioapic_get_trigger_mode(const struct domain * d,unsigned int gsi)561 int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi)
562 {
563 unsigned int pin;
564 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
565
566 if ( !vioapic )
567 return -EINVAL;
568
569 return vioapic->redirtbl[pin].fields.trig_mode;
570 }
571
ioapic_save(struct domain * d,hvm_domain_context_t * h)572 static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
573 {
574 struct hvm_vioapic *s;
575
576 if ( !has_vioapic(d) )
577 return 0;
578
579 s = domain_vioapic(d, 0);
580
581 if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
582 d->arch.hvm_domain.nr_vioapics != 1 )
583 return -EOPNOTSUPP;
584
585 return hvm_save_entry(IOAPIC, 0, h, &s->domU);
586 }
587
ioapic_load(struct domain * d,hvm_domain_context_t * h)588 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
589 {
590 struct hvm_vioapic *s;
591
592 if ( !has_vioapic(d) )
593 return -ENODEV;
594
595 s = domain_vioapic(d, 0);
596
597 if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
598 d->arch.hvm_domain.nr_vioapics != 1 )
599 return -EOPNOTSUPP;
600
601 return hvm_load_entry(IOAPIC, h, &s->domU);
602 }
603
604 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
605
vioapic_reset(struct domain * d)606 void vioapic_reset(struct domain *d)
607 {
608 unsigned int i;
609
610 if ( !has_vioapic(d) )
611 {
612 ASSERT(!d->arch.hvm_domain.nr_vioapics);
613 return;
614 }
615
616 for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
617 {
618 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
619 unsigned int nr_pins = vioapic->nr_pins, base_gsi = vioapic->base_gsi;
620 unsigned int pin;
621
622 memset(vioapic, 0, hvm_vioapic_size(nr_pins));
623 for ( pin = 0; pin < nr_pins; pin++ )
624 vioapic->redirtbl[pin].fields.mask = 1;
625
626 if ( !is_hardware_domain(d) )
627 {
628 ASSERT(!i && !base_gsi);
629 vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
630 vioapic->id = 0;
631 }
632 else
633 {
634 vioapic->base_address = mp_ioapics[i].mpc_apicaddr;
635 vioapic->id = mp_ioapics[i].mpc_apicid;
636 }
637 vioapic->base_gsi = base_gsi;
638 vioapic->nr_pins = nr_pins;
639 vioapic->domain = d;
640 }
641 }
642
vioapic_free(const struct domain * d,unsigned int nr_vioapics)643 static void vioapic_free(const struct domain *d, unsigned int nr_vioapics)
644 {
645 unsigned int i;
646
647 for ( i = 0; i < nr_vioapics; i++)
648 xfree(domain_vioapic(d, i));
649 xfree(d->arch.hvm_domain.vioapic);
650 }
651
vioapic_init(struct domain * d)652 int vioapic_init(struct domain *d)
653 {
654 unsigned int i, nr_vioapics, nr_gsis = 0;
655
656 if ( !has_vioapic(d) )
657 {
658 ASSERT(!d->arch.hvm_domain.nr_vioapics);
659 return 0;
660 }
661
662 nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1;
663
664 if ( (d->arch.hvm_domain.vioapic == NULL) &&
665 ((d->arch.hvm_domain.vioapic =
666 xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) )
667 return -ENOMEM;
668
669 for ( i = 0; i < nr_vioapics; i++ )
670 {
671 unsigned int nr_pins, base_gsi;
672
673 if ( is_hardware_domain(d) )
674 {
675 nr_pins = nr_ioapic_entries[i];
676 base_gsi = io_apic_gsi_base(i);
677 }
678 else
679 {
680 nr_pins = ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
681 base_gsi = 0;
682 }
683
684 if ( (domain_vioapic(d, i) =
685 xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL )
686 {
687 vioapic_free(d, nr_vioapics);
688 return -ENOMEM;
689 }
690 domain_vioapic(d, i)->nr_pins = nr_pins;
691 domain_vioapic(d, i)->base_gsi = base_gsi;
692 nr_gsis = max(nr_gsis, base_gsi + nr_pins);
693 }
694
695 /*
696 * NB: hvm_domain_irq(d)->nr_gsis is actually the highest GSI + 1, but
697 * there might be holes in this range (ie: GSIs that don't belong to any
698 * vIO APIC).
699 */
700 ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
701
702 d->arch.hvm_domain.nr_vioapics = nr_vioapics;
703 vioapic_reset(d);
704
705 register_mmio_handler(d, &vioapic_mmio_ops);
706
707 return 0;
708 }
709
vioapic_deinit(struct domain * d)710 void vioapic_deinit(struct domain *d)
711 {
712 if ( !has_vioapic(d) )
713 {
714 ASSERT(!d->arch.hvm_domain.nr_vioapics);
715 return;
716 }
717
718 vioapic_free(d, d->arch.hvm_domain.nr_vioapics);
719 }
720