1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
17 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
18 * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
19 */
20
21 #include <xen/init.h>
22 #include <xen/bitmap.h>
23 #include <xen/errno.h>
24 #include <xen/kernel.h>
25 #include <xen/acpi.h>
26 #include <xen/mm.h>
27 #include <xen/xmalloc.h>
28 #include <xen/pci.h>
29 #include <xen/pci_regs.h>
30 #include <asm/atomic.h>
31 #include <asm/string.h>
32 #include "dmar.h"
33 #include "iommu.h"
34 #include "extern.h"
35 #include "vtd.h"
36
37 #undef PREFIX
38 #define PREFIX VTDPREFIX "ACPI DMAR:"
39 #define DEBUG
40
41 #define MIN_SCOPE_LEN (sizeof(struct acpi_dmar_device_scope) + \
42 sizeof(struct acpi_dmar_pci_path))
43
44 LIST_HEAD_READ_MOSTLY(acpi_drhd_units);
45 LIST_HEAD_READ_MOSTLY(acpi_rmrr_units);
46 static LIST_HEAD_READ_MOSTLY(acpi_atsr_units);
47 static LIST_HEAD_READ_MOSTLY(acpi_rhsa_units);
48
49 static struct acpi_table_header *__read_mostly dmar_table;
50 static int __read_mostly dmar_flags;
51 static u64 __read_mostly igd_drhd_address;
52
dmar_scope_add_buses(struct dmar_scope * scope,u16 sec_bus,u16 sub_bus)53 static void __init dmar_scope_add_buses(struct dmar_scope *scope, u16 sec_bus,
54 u16 sub_bus)
55 {
56 sub_bus &= 0xff;
57 if (sec_bus > sub_bus)
58 return;
59
60 while ( sec_bus <= sub_bus )
61 set_bit(sec_bus++, scope->buses);
62 }
63
acpi_register_drhd_unit(struct acpi_drhd_unit * drhd)64 static int __init acpi_register_drhd_unit(struct acpi_drhd_unit *drhd)
65 {
66 /*
67 * add INCLUDE_ALL at the tail, so scan the list will find it at
68 * the very end.
69 */
70 if ( drhd->include_all )
71 list_add_tail(&drhd->list, &acpi_drhd_units);
72 else
73 list_add(&drhd->list, &acpi_drhd_units);
74 return 0;
75 }
76
acpi_register_rmrr_unit(struct acpi_rmrr_unit * rmrr)77 static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr)
78 {
79 list_add(&rmrr->list, &acpi_rmrr_units);
80 return 0;
81 }
82
scope_devices_free(struct dmar_scope * scope)83 static void scope_devices_free(struct dmar_scope *scope)
84 {
85 if ( !scope )
86 return;
87
88 scope->devices_cnt = 0;
89 xfree(scope->devices);
90 scope->devices = NULL;
91 }
92
disable_all_dmar_units(void)93 static void __init disable_all_dmar_units(void)
94 {
95 struct acpi_drhd_unit *drhd, *_drhd;
96 struct acpi_rmrr_unit *rmrr, *_rmrr;
97 struct acpi_atsr_unit *atsr, *_atsr;
98
99 list_for_each_entry_safe ( drhd, _drhd, &acpi_drhd_units, list )
100 {
101 list_del(&drhd->list);
102 scope_devices_free(&drhd->scope);
103 xfree(drhd);
104 }
105 list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list )
106 {
107 list_del(&rmrr->list);
108 scope_devices_free(&rmrr->scope);
109 xfree(rmrr);
110 }
111 list_for_each_entry_safe ( atsr, _atsr, &acpi_atsr_units, list )
112 {
113 list_del(&atsr->list);
114 scope_devices_free(&atsr->scope);
115 xfree(atsr);
116 }
117 }
118
acpi_ioapic_device_match(struct list_head * ioapic_list,unsigned int apic_id)119 static int acpi_ioapic_device_match(
120 struct list_head *ioapic_list, unsigned int apic_id)
121 {
122 struct acpi_ioapic_unit *ioapic;
123 list_for_each_entry( ioapic, ioapic_list, list ) {
124 if (ioapic->apic_id == apic_id)
125 return 1;
126 }
127 return 0;
128 }
129
ioapic_to_drhd(unsigned int apic_id)130 struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id)
131 {
132 struct acpi_drhd_unit *drhd;
133 list_for_each_entry( drhd, &acpi_drhd_units, list )
134 if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) )
135 return drhd;
136 return NULL;
137 }
138
iommu_to_drhd(struct iommu * iommu)139 struct acpi_drhd_unit * iommu_to_drhd(struct iommu *iommu)
140 {
141 struct acpi_drhd_unit *drhd;
142
143 if ( iommu == NULL )
144 return NULL;
145
146 list_for_each_entry( drhd, &acpi_drhd_units, list )
147 if ( drhd->iommu == iommu )
148 return drhd;
149
150 return NULL;
151 }
152
ioapic_to_iommu(unsigned int apic_id)153 struct iommu * ioapic_to_iommu(unsigned int apic_id)
154 {
155 struct acpi_drhd_unit *drhd;
156
157 list_for_each_entry( drhd, &acpi_drhd_units, list )
158 if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) )
159 return drhd->iommu;
160 return NULL;
161 }
162
acpi_hpet_device_match(struct list_head * list,unsigned int hpet_id)163 static bool_t acpi_hpet_device_match(
164 struct list_head *list, unsigned int hpet_id)
165 {
166 struct acpi_hpet_unit *hpet;
167
168 list_for_each_entry( hpet, list, list )
169 if (hpet->id == hpet_id)
170 return 1;
171 return 0;
172 }
173
hpet_to_drhd(unsigned int hpet_id)174 struct acpi_drhd_unit *hpet_to_drhd(unsigned int hpet_id)
175 {
176 struct acpi_drhd_unit *drhd;
177
178 list_for_each_entry( drhd, &acpi_drhd_units, list )
179 if ( acpi_hpet_device_match(&drhd->hpet_list, hpet_id) )
180 return drhd;
181 return NULL;
182 }
183
hpet_to_iommu(unsigned int hpet_id)184 struct iommu *hpet_to_iommu(unsigned int hpet_id)
185 {
186 struct acpi_drhd_unit *drhd = hpet_to_drhd(hpet_id);
187
188 return drhd ? drhd->iommu : NULL;
189 }
190
acpi_register_atsr_unit(struct acpi_atsr_unit * atsr)191 static int __init acpi_register_atsr_unit(struct acpi_atsr_unit *atsr)
192 {
193 /*
194 * add ALL_PORTS at the tail, so scan the list will find it at
195 * the very end.
196 */
197 if ( atsr->all_ports )
198 list_add_tail(&atsr->list, &acpi_atsr_units);
199 else
200 list_add(&atsr->list, &acpi_atsr_units);
201 return 0;
202 }
203
acpi_find_matched_drhd_unit(const struct pci_dev * pdev)204 struct acpi_drhd_unit *acpi_find_matched_drhd_unit(const struct pci_dev *pdev)
205 {
206 u8 bus, devfn;
207 struct acpi_drhd_unit *drhd;
208 struct acpi_drhd_unit *include_all = NULL;
209 int i;
210
211 if ( pdev == NULL )
212 return NULL;
213
214 if ( pdev->info.is_virtfn )
215 {
216 bus = pdev->info.physfn.bus;
217 devfn = !pdev->info.is_extfn ? pdev->info.physfn.devfn : 0;
218 }
219 else if ( pdev->info.is_extfn )
220 {
221 bus = pdev->bus;
222 devfn = 0;
223 }
224 else
225 {
226 bus = pdev->bus;
227 devfn = pdev->devfn;
228 }
229
230 list_for_each_entry ( drhd, &acpi_drhd_units, list )
231 {
232 if ( drhd->segment != pdev->seg )
233 continue;
234
235 for (i = 0; i < drhd->scope.devices_cnt; i++)
236 if ( drhd->scope.devices[i] == PCI_BDF2(bus, devfn) )
237 return drhd;
238
239 if ( test_bit(bus, drhd->scope.buses) )
240 return drhd;
241
242 if ( drhd->include_all )
243 include_all = drhd;
244 }
245 return include_all;
246 }
247
acpi_find_matched_atsr_unit(const struct pci_dev * pdev)248 struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev)
249 {
250 struct acpi_atsr_unit *atsr;
251 struct acpi_atsr_unit *all_ports = NULL;
252
253 list_for_each_entry ( atsr, &acpi_atsr_units, list )
254 {
255 if ( atsr->segment != pdev->seg )
256 continue;
257
258 if ( test_bit(pdev->bus, atsr->scope.buses) )
259 return atsr;
260
261 if ( atsr->all_ports )
262 all_ports = atsr;
263 }
264 return all_ports;
265 }
266
drhd_to_rhsa(struct acpi_drhd_unit * drhd)267 struct acpi_rhsa_unit * drhd_to_rhsa(struct acpi_drhd_unit *drhd)
268 {
269 struct acpi_rhsa_unit *rhsa;
270
271 if ( drhd == NULL )
272 return NULL;
273
274 list_for_each_entry ( rhsa, &acpi_rhsa_units, list )
275 {
276 if ( rhsa->address == drhd->address )
277 return rhsa;
278 }
279 return NULL;
280 }
281
is_igd_drhd(struct acpi_drhd_unit * drhd)282 int is_igd_drhd(struct acpi_drhd_unit *drhd)
283 {
284 return drhd && (drhd->address == igd_drhd_address);
285 }
286
287 /*
288 * Count number of devices in device scope. Do not include PCI sub
289 * hierarchies.
290 */
scope_device_count(const void * start,const void * end)291 static int __init scope_device_count(const void *start, const void *end)
292 {
293 const struct acpi_dmar_device_scope *scope;
294 int count = 0;
295
296 while ( start < end )
297 {
298 scope = start;
299 if ( scope->length < MIN_SCOPE_LEN )
300 {
301 printk(XENLOG_WARNING VTDPREFIX "Invalid device scope\n");
302 return -EINVAL;
303 }
304
305 if ( scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE ||
306 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
307 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC ||
308 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET )
309 count++;
310
311 start += scope->length;
312 }
313
314 return count;
315 }
316
317
acpi_parse_dev_scope(const void * start,const void * end,struct dmar_scope * scope,int type,u16 seg)318 static int __init acpi_parse_dev_scope(
319 const void *start, const void *end, struct dmar_scope *scope,
320 int type, u16 seg)
321 {
322 struct acpi_ioapic_unit *acpi_ioapic_unit;
323 const struct acpi_dmar_device_scope *acpi_scope;
324 u16 bus, sub_bus, sec_bus;
325 const struct acpi_dmar_pci_path *path;
326 struct acpi_drhd_unit *drhd = type == DMAR_TYPE ?
327 container_of(scope, struct acpi_drhd_unit, scope) : NULL;
328 int depth, cnt, didx = 0, ret;
329
330 if ( (cnt = scope_device_count(start, end)) < 0 )
331 return cnt;
332
333 if ( cnt > 0 )
334 {
335 scope->devices = xzalloc_array(u16, cnt);
336 if ( !scope->devices )
337 return -ENOMEM;
338 }
339 scope->devices_cnt = cnt;
340
341 while ( start < end )
342 {
343 acpi_scope = start;
344 path = (const void *)(acpi_scope + 1);
345 depth = (acpi_scope->length - sizeof(*acpi_scope)) / sizeof(*path);
346 bus = acpi_scope->bus;
347
348 while ( --depth > 0 )
349 {
350 bus = pci_conf_read8(seg, bus, path->dev, path->fn,
351 PCI_SECONDARY_BUS);
352 path++;
353 }
354
355 switch ( acpi_scope->entry_type )
356 {
357 case ACPI_DMAR_SCOPE_TYPE_BRIDGE:
358 sec_bus = pci_conf_read8(seg, bus, path->dev, path->fn,
359 PCI_SECONDARY_BUS);
360 sub_bus = pci_conf_read8(seg, bus, path->dev, path->fn,
361 PCI_SUBORDINATE_BUS);
362 if ( iommu_verbose )
363 printk(VTDPREFIX
364 " bridge: %04x:%02x:%02x.%u start=%x sec=%x sub=%x\n",
365 seg, bus, path->dev, path->fn,
366 acpi_scope->bus, sec_bus, sub_bus);
367
368 dmar_scope_add_buses(scope, sec_bus, sub_bus);
369 break;
370
371 case ACPI_DMAR_SCOPE_TYPE_HPET:
372 if ( iommu_verbose )
373 printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n",
374 seg, bus, path->dev, path->fn);
375
376 if ( drhd )
377 {
378 struct acpi_hpet_unit *acpi_hpet_unit;
379
380 ret = -ENOMEM;
381 acpi_hpet_unit = xmalloc(struct acpi_hpet_unit);
382 if ( !acpi_hpet_unit )
383 goto out;
384 acpi_hpet_unit->id = acpi_scope->enumeration_id;
385 acpi_hpet_unit->bus = bus;
386 acpi_hpet_unit->dev = path->dev;
387 acpi_hpet_unit->func = path->fn;
388 list_add(&acpi_hpet_unit->list, &drhd->hpet_list);
389 }
390
391 break;
392
393 case ACPI_DMAR_SCOPE_TYPE_ENDPOINT:
394 if ( iommu_verbose )
395 printk(VTDPREFIX " endpoint: %04x:%02x:%02x.%u\n",
396 seg, bus, path->dev, path->fn);
397
398 if ( drhd )
399 {
400 if ( (seg == 0) && (bus == 0) && (path->dev == 2) &&
401 (path->fn == 0) )
402 igd_drhd_address = drhd->address;
403 }
404
405 break;
406
407 case ACPI_DMAR_SCOPE_TYPE_IOAPIC:
408 if ( iommu_verbose )
409 printk(VTDPREFIX " IOAPIC: %04x:%02x:%02x.%u\n",
410 seg, bus, path->dev, path->fn);
411
412 if ( drhd )
413 {
414 ret = -ENOMEM;
415 acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit);
416 if ( !acpi_ioapic_unit )
417 goto out;
418 acpi_ioapic_unit->apic_id = acpi_scope->enumeration_id;
419 acpi_ioapic_unit->ioapic.bdf.bus = bus;
420 acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
421 acpi_ioapic_unit->ioapic.bdf.func = path->fn;
422 list_add(&acpi_ioapic_unit->list, &drhd->ioapic_list);
423 }
424
425 break;
426
427 default:
428 if ( iommu_verbose )
429 printk(XENLOG_WARNING VTDPREFIX "Unknown scope type %#x\n",
430 acpi_scope->entry_type);
431 start += acpi_scope->length;
432 continue;
433 }
434 scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
435 start += acpi_scope->length;
436 }
437
438 ret = 0;
439
440 out:
441 if ( ret )
442 scope_devices_free(scope);
443
444 return ret;
445 }
446
acpi_dmar_check_length(const struct acpi_dmar_header * h,unsigned int min_len)447 static int __init acpi_dmar_check_length(
448 const struct acpi_dmar_header *h, unsigned int min_len)
449 {
450 if ( h->length >= min_len )
451 return 0;
452 printk(XENLOG_ERR VTDPREFIX "Invalid ACPI DMAR entry length: %#x\n",
453 h->length);
454 return -EINVAL;
455 }
456
457 static int __init
acpi_parse_one_drhd(struct acpi_dmar_header * header)458 acpi_parse_one_drhd(struct acpi_dmar_header *header)
459 {
460 struct acpi_dmar_hardware_unit *drhd =
461 container_of(header, struct acpi_dmar_hardware_unit, header);
462 void *dev_scope_start, *dev_scope_end;
463 struct acpi_drhd_unit *dmaru;
464 int ret;
465 static int include_all = 0;
466
467 if ( (ret = acpi_dmar_check_length(header, sizeof(*drhd))) != 0 )
468 return ret;
469
470 if ( !drhd->address || !(drhd->address + 1) )
471 return -ENODEV;
472
473 dmaru = xzalloc(struct acpi_drhd_unit);
474 if ( !dmaru )
475 return -ENOMEM;
476
477 dmaru->address = drhd->address;
478 dmaru->segment = drhd->segment;
479 dmaru->include_all = drhd->flags & ACPI_DMAR_INCLUDE_ALL;
480 INIT_LIST_HEAD(&dmaru->ioapic_list);
481 INIT_LIST_HEAD(&dmaru->hpet_list);
482 if ( iommu_verbose )
483 printk(VTDPREFIX " dmaru->address = %"PRIx64"\n", dmaru->address);
484
485 ret = iommu_alloc(dmaru);
486 if ( ret )
487 goto out;
488
489 dev_scope_start = (void *)(drhd + 1);
490 dev_scope_end = ((void *)drhd) + header->length;
491 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
492 &dmaru->scope, DMAR_TYPE, drhd->segment);
493
494 if ( dmaru->include_all )
495 {
496 if ( iommu_verbose )
497 printk(VTDPREFIX " flags: INCLUDE_ALL\n");
498 /* Only allow one INCLUDE_ALL */
499 if ( drhd->segment == 0 && include_all )
500 {
501 printk(XENLOG_WARNING VTDPREFIX
502 "Only one INCLUDE_ALL device scope is allowed\n");
503 ret = -EINVAL;
504 }
505 if ( drhd->segment == 0 )
506 include_all = 1;
507 }
508
509 if ( ret )
510 goto out;
511 else if ( force_iommu || dmaru->include_all )
512 acpi_register_drhd_unit(dmaru);
513 else
514 {
515 u8 b, d, f;
516 unsigned int i = 0, invalid_cnt = 0;
517 union {
518 const void *raw;
519 const struct acpi_dmar_device_scope *scope;
520 } p;
521
522 /* Skip checking if segment is not accessible yet. */
523 if ( !pci_known_segment(drhd->segment) )
524 i = UINT_MAX;
525
526 for ( p.raw = dev_scope_start; i < dmaru->scope.devices_cnt;
527 i++, p.raw += p.scope->length )
528 {
529 if ( p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC ||
530 p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET )
531 continue;
532
533 b = PCI_BUS(dmaru->scope.devices[i]);
534 d = PCI_SLOT(dmaru->scope.devices[i]);
535 f = PCI_FUNC(dmaru->scope.devices[i]);
536
537 if ( !pci_device_detect(drhd->segment, b, d, f) )
538 {
539 printk(XENLOG_WARNING VTDPREFIX
540 " Non-existent device (%04x:%02x:%02x.%u) in this DRHD's scope!\n",
541 drhd->segment, b, d, f);
542 invalid_cnt++;
543 }
544 }
545
546 if ( invalid_cnt )
547 {
548 if ( iommu_workaround_bios_bug &&
549 invalid_cnt == dmaru->scope.devices_cnt )
550 {
551 printk(XENLOG_WARNING VTDPREFIX
552 " Workaround BIOS bug: ignoring DRHD (no devices in its scope are PCI discoverable)\n");
553
554 scope_devices_free(&dmaru->scope);
555 iommu_free(dmaru);
556 xfree(dmaru);
557 }
558 else
559 {
560 printk(XENLOG_WARNING VTDPREFIX
561 " DRHD is invalid (some devices in its scope are not PCI discoverable)\n");
562 printk(XENLOG_WARNING VTDPREFIX
563 " Try \"iommu=force\" or \"iommu=workaround_bios_bug\" if you really want VT-d\n");
564 ret = -EINVAL;
565 }
566 }
567 else
568 acpi_register_drhd_unit(dmaru);
569 }
570
571 out:
572 if ( ret )
573 {
574 scope_devices_free(&dmaru->scope);
575 iommu_free(dmaru);
576 xfree(dmaru);
577 }
578
579 return ret;
580 }
581
register_one_rmrr(struct acpi_rmrr_unit * rmrru)582 static int register_one_rmrr(struct acpi_rmrr_unit *rmrru)
583 {
584 bool ignore = false;
585 unsigned int i = 0;
586 int ret = 0;
587
588 /* Skip checking if segment is not accessible yet. */
589 if ( !pci_known_segment(rmrru->segment) )
590 i = UINT_MAX;
591
592 for ( ; i < rmrru->scope.devices_cnt; i++ )
593 {
594 u8 b = PCI_BUS(rmrru->scope.devices[i]);
595 u8 d = PCI_SLOT(rmrru->scope.devices[i]);
596 u8 f = PCI_FUNC(rmrru->scope.devices[i]);
597
598 if ( pci_device_detect(rmrru->segment, b, d, f) == 0 )
599 {
600 dprintk(XENLOG_WARNING VTDPREFIX,
601 " Non-existent device (%04x:%02x:%02x.%u) is reported"
602 " in RMRR (%"PRIx64", %"PRIx64")'s scope!\n",
603 rmrru->segment, b, d, f,
604 rmrru->base_address, rmrru->end_address);
605 ignore = true;
606 }
607 else
608 {
609 ignore = false;
610 break;
611 }
612 }
613
614 if ( ignore )
615 {
616 dprintk(XENLOG_WARNING VTDPREFIX,
617 " Ignore the RMRR (%"PRIx64", %"PRIx64") due to "
618 "devices under its scope are not PCI discoverable!\n",
619 rmrru->base_address, rmrru->end_address);
620 scope_devices_free(&rmrru->scope);
621 xfree(rmrru);
622 return 1;
623 }
624 else if ( rmrru->base_address > rmrru->end_address )
625 {
626 dprintk(XENLOG_WARNING VTDPREFIX,
627 " The RMRR (%"PRIx64", %"PRIx64") is incorrect!\n",
628 rmrru->base_address, rmrru->end_address);
629 scope_devices_free(&rmrru->scope);
630 xfree(rmrru);
631 ret = -EFAULT;
632 }
633 else
634 {
635 if ( iommu_verbose )
636 dprintk(VTDPREFIX,
637 " RMRR region: base_addr %"PRIx64" end_addr %"PRIx64"\n",
638 rmrru->base_address, rmrru->end_address);
639 acpi_register_rmrr_unit(rmrru);
640 }
641
642 return ret;
643 }
644
645 static int __init
acpi_parse_one_rmrr(struct acpi_dmar_header * header)646 acpi_parse_one_rmrr(struct acpi_dmar_header *header)
647 {
648 struct acpi_dmar_reserved_memory *rmrr =
649 container_of(header, struct acpi_dmar_reserved_memory, header);
650 struct acpi_rmrr_unit *rmrru;
651 void *dev_scope_start, *dev_scope_end;
652 u64 base_addr = rmrr->base_address, end_addr = rmrr->end_address;
653 int ret;
654
655 if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 )
656 return ret;
657
658 list_for_each_entry(rmrru, &acpi_rmrr_units, list)
659 if ( base_addr <= rmrru->end_address && rmrru->base_address <= end_addr )
660 {
661 printk(XENLOG_ERR VTDPREFIX
662 "Overlapping RMRRs [%"PRIx64",%"PRIx64"] and [%"PRIx64",%"PRIx64"]\n",
663 rmrru->base_address, rmrru->end_address,
664 base_addr, end_addr);
665 return -EEXIST;
666 }
667
668 /* This check is here simply to detect when RMRR values are
669 * not properly represented in the system memory map and
670 * inform the user
671 */
672 if ( (!page_is_ram_type(paddr_to_pfn(base_addr), RAM_TYPE_RESERVED)) ||
673 (!page_is_ram_type(paddr_to_pfn(end_addr), RAM_TYPE_RESERVED)) )
674 {
675 printk(XENLOG_WARNING VTDPREFIX
676 " RMRR address range %"PRIx64"..%"PRIx64" not in reserved memory;"
677 " need \"iommu_inclusive_mapping=1\"?\n",
678 base_addr, end_addr);
679 }
680
681 rmrru = xzalloc(struct acpi_rmrr_unit);
682 if ( !rmrru )
683 return -ENOMEM;
684
685 rmrru->base_address = base_addr;
686 rmrru->end_address = end_addr;
687 rmrru->segment = rmrr->segment;
688
689 dev_scope_start = (void *)(rmrr + 1);
690 dev_scope_end = ((void *)rmrr) + header->length;
691 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
692 &rmrru->scope, RMRR_TYPE, rmrr->segment);
693
694 if ( !ret && (rmrru->scope.devices_cnt != 0) )
695 {
696 ret = register_one_rmrr(rmrru);
697 /*
698 * register_one_rmrr() returns greater than 0 when a specified
699 * PCIe device cannot be detected. To prevent VT-d from being
700 * disabled in such cases, reset the return value to 0 here.
701 */
702 if ( ret > 0 )
703 ret = 0;
704
705 }
706 else
707 xfree(rmrru);
708
709 return ret;
710 }
711
712 static int __init
acpi_parse_one_atsr(struct acpi_dmar_header * header)713 acpi_parse_one_atsr(struct acpi_dmar_header *header)
714 {
715 struct acpi_dmar_atsr *atsr =
716 container_of(header, struct acpi_dmar_atsr, header);
717 struct acpi_atsr_unit *atsru;
718 int ret;
719 static int all_ports;
720 void *dev_scope_start, *dev_scope_end;
721
722 if ( (ret = acpi_dmar_check_length(header, sizeof(*atsr))) != 0 )
723 return ret;
724
725 atsru = xzalloc(struct acpi_atsr_unit);
726 if ( !atsru )
727 return -ENOMEM;
728
729 atsru->segment = atsr->segment;
730 atsru->all_ports = atsr->flags & ACPI_DMAR_ALL_PORTS;
731 if ( iommu_verbose )
732 printk(VTDPREFIX " atsru->all_ports: %x\n", atsru->all_ports);
733 if ( !atsru->all_ports )
734 {
735 dev_scope_start = (void *)(atsr + 1);
736 dev_scope_end = ((void *)atsr) + header->length;
737 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
738 &atsru->scope, ATSR_TYPE, atsr->segment);
739 }
740 else
741 {
742 if ( iommu_verbose )
743 printk(VTDPREFIX " flags: ALL_PORTS\n");
744 /* Only allow one ALL_PORTS */
745 if ( atsr->segment == 0 && all_ports )
746 {
747 printk(XENLOG_WARNING VTDPREFIX
748 "Only one ALL_PORTS device scope is allowed\n");
749 ret = -EINVAL;
750 }
751 if ( atsr->segment == 0 )
752 all_ports = 1;
753 }
754
755 if ( ret )
756 {
757 scope_devices_free(&atsru->scope);
758 xfree(atsru);
759 }
760 else
761 acpi_register_atsr_unit(atsru);
762 return ret;
763 }
764
765 static int __init
acpi_parse_one_rhsa(struct acpi_dmar_header * header)766 acpi_parse_one_rhsa(struct acpi_dmar_header *header)
767 {
768 struct acpi_dmar_rhsa *rhsa =
769 container_of(header, struct acpi_dmar_rhsa, header);
770 struct acpi_rhsa_unit *rhsau;
771 int ret;
772
773 if ( (ret = acpi_dmar_check_length(header, sizeof(*rhsa))) != 0 )
774 return ret;
775
776 rhsau = xzalloc(struct acpi_rhsa_unit);
777 if ( !rhsau )
778 return -ENOMEM;
779
780 rhsau->address = rhsa->base_address;
781 rhsau->proximity_domain = rhsa->proximity_domain;
782 list_add_tail(&rhsau->list, &acpi_rhsa_units);
783 if ( iommu_verbose )
784 printk(VTDPREFIX
785 " rhsau->address: %"PRIx64" rhsau->proximity_domain: %"PRIx32"\n",
786 rhsau->address, rhsau->proximity_domain);
787
788 return ret;
789 }
790
acpi_parse_dmar(struct acpi_table_header * table)791 static int __init acpi_parse_dmar(struct acpi_table_header *table)
792 {
793 struct acpi_table_dmar *dmar;
794 struct acpi_dmar_header *entry_header;
795 u8 dmar_host_address_width;
796 int ret = 0;
797
798 dmar = (struct acpi_table_dmar *)table;
799 dmar_flags = dmar->flags;
800
801 if ( !iommu_enable && !iommu_intremap )
802 {
803 ret = -EINVAL;
804 goto out;
805 }
806
807 if ( !dmar->width )
808 {
809 printk(XENLOG_WARNING VTDPREFIX "Zero: Invalid DMAR width\n");
810 ret = -EINVAL;
811 goto out;
812 }
813
814 dmar_host_address_width = dmar->width + 1;
815 if ( iommu_verbose )
816 printk(VTDPREFIX "Host address width %d\n", dmar_host_address_width);
817
818 entry_header = (void *)(dmar + 1);
819 while ( ((unsigned long)entry_header) <
820 (((unsigned long)dmar) + table->length) )
821 {
822 ret = acpi_dmar_check_length(entry_header, sizeof(*entry_header));
823 if ( ret )
824 break;
825
826 switch ( entry_header->type )
827 {
828 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
829 if ( iommu_verbose )
830 printk(VTDPREFIX "found ACPI_DMAR_DRHD:\n");
831 ret = acpi_parse_one_drhd(entry_header);
832 break;
833 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
834 if ( iommu_verbose )
835 printk(VTDPREFIX "found ACPI_DMAR_RMRR:\n");
836 ret = acpi_parse_one_rmrr(entry_header);
837 break;
838 case ACPI_DMAR_TYPE_ATSR:
839 if ( iommu_verbose )
840 printk(VTDPREFIX "found ACPI_DMAR_ATSR:\n");
841 ret = acpi_parse_one_atsr(entry_header);
842 break;
843 case ACPI_DMAR_HARDWARE_AFFINITY:
844 if ( iommu_verbose )
845 printk(VTDPREFIX "found ACPI_DMAR_RHSA:\n");
846 ret = acpi_parse_one_rhsa(entry_header);
847 break;
848 default:
849 dprintk(XENLOG_WARNING VTDPREFIX,
850 "Ignore unknown DMAR structure type (%#x)\n",
851 entry_header->type);
852 break;
853 }
854 if ( ret )
855 break;
856
857 entry_header = ((void *)entry_header + entry_header->length);
858 }
859
860 if ( ret )
861 {
862 printk(XENLOG_WARNING
863 "Failed to parse ACPI DMAR. Disabling VT-d.\n");
864 disable_all_dmar_units();
865 }
866
867 out:
868 /* Zap ACPI DMAR signature to prevent dom0 using vt-d HW. */
869 acpi_dmar_zap();
870 return ret;
871 }
872
873 #define MAX_USER_RMRR_PAGES 16
874 #define MAX_USER_RMRR 10
875
876 /* RMRR units derived from command line rmrr option. */
877 #define MAX_USER_RMRR_DEV 20
878 struct user_rmrr {
879 struct list_head list;
880 unsigned long base_pfn, end_pfn;
881 unsigned int dev_count;
882 u32 sbdf[MAX_USER_RMRR_DEV];
883 };
884
885 static unsigned int __initdata nr_rmrr;
886 static struct user_rmrr __initdata user_rmrrs[MAX_USER_RMRR];
887
888 /* Macro for RMRR inclusive range formatting. */
889 #define ERMRRU_FMT "[%lx-%lx]"
890 #define ERMRRU_ARG(eru) eru.base_pfn, eru.end_pfn
891
add_user_rmrr(void)892 static int __init add_user_rmrr(void)
893 {
894 struct acpi_rmrr_unit *rmrr, *rmrru;
895 unsigned int idx, seg, i;
896 unsigned long base, end;
897 bool overlap;
898
899 for ( i = 0; i < nr_rmrr; i++ )
900 {
901 base = user_rmrrs[i].base_pfn;
902 end = user_rmrrs[i].end_pfn;
903
904 if ( base > end )
905 {
906 printk(XENLOG_ERR VTDPREFIX
907 "Invalid RMRR Range "ERMRRU_FMT"\n",
908 ERMRRU_ARG(user_rmrrs[i]));
909 continue;
910 }
911
912 if ( (end - base) >= MAX_USER_RMRR_PAGES )
913 {
914 printk(XENLOG_ERR VTDPREFIX
915 "RMRR range "ERMRRU_FMT" exceeds "\
916 __stringify(MAX_USER_RMRR_PAGES)" pages\n",
917 ERMRRU_ARG(user_rmrrs[i]));
918 continue;
919 }
920
921 overlap = false;
922 list_for_each_entry(rmrru, &acpi_rmrr_units, list)
923 {
924 if ( pfn_to_paddr(base) <= rmrru->end_address &&
925 rmrru->base_address <= pfn_to_paddr(end) )
926 {
927 printk(XENLOG_ERR VTDPREFIX
928 "Overlapping RMRRs: "ERMRRU_FMT" and [%lx-%lx]\n",
929 ERMRRU_ARG(user_rmrrs[i]),
930 paddr_to_pfn(rmrru->base_address),
931 paddr_to_pfn(rmrru->end_address));
932 overlap = true;
933 break;
934 }
935 }
936 /* Don't add overlapping RMRR. */
937 if ( overlap )
938 continue;
939
940 do
941 {
942 if ( !mfn_valid(_mfn(base)) )
943 {
944 printk(XENLOG_ERR VTDPREFIX
945 "Invalid pfn in RMRR range "ERMRRU_FMT"\n",
946 ERMRRU_ARG(user_rmrrs[i]));
947 break;
948 }
949 } while ( base++ < end );
950
951 /* Invalid pfn in range as the loop ended before end_pfn was reached. */
952 if ( base <= end )
953 continue;
954
955 rmrr = xzalloc(struct acpi_rmrr_unit);
956 if ( !rmrr )
957 return -ENOMEM;
958
959 rmrr->scope.devices = xmalloc_array(u16, user_rmrrs[i].dev_count);
960 if ( !rmrr->scope.devices )
961 {
962 xfree(rmrr);
963 return -ENOMEM;
964 }
965
966 seg = 0;
967 for ( idx = 0; idx < user_rmrrs[i].dev_count; idx++ )
968 {
969 rmrr->scope.devices[idx] = user_rmrrs[i].sbdf[idx];
970 seg |= PCI_SEG(user_rmrrs[i].sbdf[idx]);
971 }
972 if ( seg != PCI_SEG(user_rmrrs[i].sbdf[0]) )
973 {
974 printk(XENLOG_ERR VTDPREFIX
975 "Segments are not equal for RMRR range "ERMRRU_FMT"\n",
976 ERMRRU_ARG(user_rmrrs[i]));
977 scope_devices_free(&rmrr->scope);
978 xfree(rmrr);
979 continue;
980 }
981
982 rmrr->segment = seg;
983 rmrr->base_address = pfn_to_paddr(user_rmrrs[i].base_pfn);
984 /* Align the end_address to the end of the page */
985 rmrr->end_address = pfn_to_paddr(user_rmrrs[i].end_pfn) | ~PAGE_MASK;
986 rmrr->scope.devices_cnt = user_rmrrs[i].dev_count;
987
988 if ( register_one_rmrr(rmrr) )
989 printk(XENLOG_ERR VTDPREFIX
990 "Could not register RMMR range "ERMRRU_FMT"\n",
991 ERMRRU_ARG(user_rmrrs[i]));
992 }
993
994 return 0;
995 }
996
997 #include <asm/tboot.h>
998 /* ACPI tables may not be DMA protected by tboot, so use DMAR copy */
999 /* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */
1000 #define parse_dmar_table(h) tboot_parse_dmar_table(h)
1001
acpi_dmar_init(void)1002 int __init acpi_dmar_init(void)
1003 {
1004 acpi_physical_address dmar_addr;
1005 acpi_native_uint dmar_len;
1006 int ret;
1007
1008 if ( ACPI_SUCCESS(acpi_get_table_phys(ACPI_SIG_DMAR, 0,
1009 &dmar_addr, &dmar_len)) )
1010 {
1011 map_pages_to_xen((unsigned long)__va(dmar_addr), PFN_DOWN(dmar_addr),
1012 PFN_UP(dmar_addr + dmar_len) - PFN_DOWN(dmar_addr),
1013 PAGE_HYPERVISOR);
1014 dmar_table = __va(dmar_addr);
1015 }
1016
1017 ret = parse_dmar_table(acpi_parse_dmar);
1018
1019 if ( !ret )
1020 return add_user_rmrr();
1021
1022 return ret;
1023 }
1024
acpi_dmar_reinstate(void)1025 void acpi_dmar_reinstate(void)
1026 {
1027 uint32_t sig = 0x52414d44; /* "DMAR" */
1028
1029 if ( dmar_table )
1030 write_atomic((uint32_t*)&dmar_table->signature[0], sig);
1031 }
1032
acpi_dmar_zap(void)1033 void acpi_dmar_zap(void)
1034 {
1035 uint32_t sig = 0x44414d52; /* "RMAD" - doesn't alter table checksum */
1036
1037 if ( dmar_table )
1038 write_atomic((uint32_t*)&dmar_table->signature[0], sig);
1039 }
1040
platform_supports_intremap(void)1041 bool_t platform_supports_intremap(void)
1042 {
1043 const unsigned int mask = ACPI_DMAR_INTR_REMAP;
1044
1045 return (dmar_flags & mask) == ACPI_DMAR_INTR_REMAP;
1046 }
1047
platform_supports_x2apic(void)1048 bool_t __init platform_supports_x2apic(void)
1049 {
1050 const unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT;
1051
1052 return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
1053 }
1054
intel_iommu_get_reserved_device_memory(iommu_grdm_t * func,void * ctxt)1055 int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
1056 {
1057 struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL;
1058 unsigned int i;
1059 u16 bdf;
1060
1061 for_each_rmrr_device ( rmrr, bdf, i )
1062 {
1063 int rc;
1064
1065 if ( rmrr == rmrr_cur )
1066 continue;
1067
1068 rc = func(PFN_DOWN(rmrr->base_address),
1069 PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
1070 PCI_SBDF2(rmrr->segment, bdf), ctxt);
1071
1072 if ( unlikely(rc < 0) )
1073 return rc;
1074
1075 if ( rc )
1076 rmrr_cur = rmrr;
1077 }
1078
1079 return 0;
1080 }
1081
1082 /*
1083 * Parse rmrr Xen command line options and add parsed devices and regions into
1084 * acpi_rmrr_unit list to mapped as RMRRs parsed from ACPI.
1085 * Format:
1086 * rmrr=start<-end>=[s1]bdf1[,[s1]bdf2[,...]];start<-end>=[s2]bdf1[,[s2]bdf2[,...]]
1087 * If the segment of the first device is not specified,
1088 * segment zero will be used.
1089 * If other segments are not specified, first device segment will be used.
1090 * If a segment is specified for other than the first device, and it does not
1091 * match the one specified for the first one, an error will be reported.
1092 */
parse_rmrr_param(const char * str)1093 static int __init parse_rmrr_param(const char *str)
1094 {
1095 const char *s = str, *cur, *stmp;
1096 unsigned int seg, bus, dev, func, dev_count;
1097 unsigned long start, end;
1098
1099 do {
1100 if ( nr_rmrr >= MAX_USER_RMRR )
1101 return -E2BIG;
1102
1103 start = simple_strtoul(cur = s, &s, 16);
1104 if ( cur == s )
1105 return -EINVAL;
1106
1107 if ( *s == '-' )
1108 {
1109 end = simple_strtoul(cur = s + 1, &s, 16);
1110 if ( cur == s )
1111 return -EINVAL;
1112 }
1113 else
1114 end = start;
1115
1116 user_rmrrs[nr_rmrr].base_pfn = start;
1117 user_rmrrs[nr_rmrr].end_pfn = end;
1118
1119 if ( *s != '=' )
1120 continue;
1121
1122 do {
1123 bool def_seg = false;
1124
1125 stmp = parse_pci_seg(s + 1, &seg, &bus, &dev, &func, &def_seg);
1126 if ( !stmp )
1127 return -EINVAL;
1128
1129 /*
1130 * Not specified.
1131 * Segment will be replaced with one from first device.
1132 */
1133 if ( user_rmrrs[nr_rmrr].dev_count && def_seg )
1134 seg = PCI_SEG(user_rmrrs[nr_rmrr].sbdf[0]);
1135
1136 /* Keep sbdf's even if they differ and later report an error. */
1137 dev_count = user_rmrrs[nr_rmrr].dev_count;
1138 user_rmrrs[nr_rmrr].sbdf[dev_count] = PCI_SBDF(seg, bus, dev, func);
1139
1140 user_rmrrs[nr_rmrr].dev_count++;
1141 s = stmp;
1142 } while ( *s == ',' &&
1143 user_rmrrs[nr_rmrr].dev_count < MAX_USER_RMRR_DEV );
1144
1145 if ( user_rmrrs[nr_rmrr].dev_count )
1146 nr_rmrr++;
1147
1148 } while ( *s++ == ';' );
1149
1150 return s[-1] ? -EINVAL : 0;
1151 }
1152 custom_param("rmrr", parse_rmrr_param);
1153