1 /*
2  * Copyright (C) 2007 Advanced Micro Devices, Inc.
3  * Author: Leo Duran <leo.duran@amd.com>
4  * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <xen/errno.h>
21 #include <xen/acpi.h>
22 #include <asm/apicdef.h>
23 #include <asm/io_apic.h>
24 #include <asm/amd-iommu.h>
25 #include <asm/hvm/svm/amd-iommu-proto.h>
26 
27 /* Some helper structures, particularly to deal with ranges. */
28 
29 struct acpi_ivhd_device_range {
30    struct acpi_ivrs_device4 start;
31    struct acpi_ivrs_device4 end;
32 };
33 
34 struct acpi_ivhd_device_alias_range {
35    struct acpi_ivrs_device8a alias;
36    struct acpi_ivrs_device4 end;
37 };
38 
39 struct acpi_ivhd_device_extended_range {
40    struct acpi_ivrs_device8b extended;
41    struct acpi_ivrs_device4 end;
42 };
43 
44 union acpi_ivhd_device {
45    struct acpi_ivrs_de_header header;
46    struct acpi_ivrs_device4 select;
47    struct acpi_ivhd_device_range range;
48    struct acpi_ivrs_device8a alias;
49    struct acpi_ivhd_device_alias_range alias_range;
50    struct acpi_ivrs_device8b extended;
51    struct acpi_ivhd_device_extended_range extended_range;
52    struct acpi_ivrs_device8c special;
53 };
54 
add_ivrs_mapping_entry(u16 bdf,u16 alias_id,u8 flags,struct amd_iommu * iommu)55 static void __init add_ivrs_mapping_entry(
56     u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
57 {
58     struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg);
59 
60     ASSERT( ivrs_mappings != NULL );
61 
62     /* setup requestor id */
63     ivrs_mappings[bdf].dte_requestor_id = alias_id;
64 
65     /* override flags for range of devices */
66     ivrs_mappings[bdf].device_flags = flags;
67 
68     if (ivrs_mappings[alias_id].intremap_table == NULL )
69     {
70          /* allocate per-device interrupt remapping table */
71          if ( amd_iommu_perdev_intremap )
72              ivrs_mappings[alias_id].intremap_table =
73                 amd_iommu_alloc_intremap_table(
74                     &ivrs_mappings[alias_id].intremap_inuse);
75          else
76          {
77              if ( shared_intremap_table == NULL  )
78                  shared_intremap_table = amd_iommu_alloc_intremap_table(
79                      &shared_intremap_inuse);
80              ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
81              ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse;
82          }
83     }
84     /* assgin iommu hardware */
85     ivrs_mappings[bdf].iommu = iommu;
86 }
87 
find_iommu_from_bdf_cap(u16 seg,u16 bdf,u16 cap_offset)88 static struct amd_iommu * __init find_iommu_from_bdf_cap(
89     u16 seg, u16 bdf, u16 cap_offset)
90 {
91     struct amd_iommu *iommu;
92 
93     for_each_amd_iommu ( iommu )
94         if ( (iommu->seg == seg) && (iommu->bdf == bdf) &&
95              (iommu->cap_offset == cap_offset) )
96             return iommu;
97 
98     return NULL;
99 }
100 
reserve_iommu_exclusion_range(struct amd_iommu * iommu,uint64_t base,uint64_t limit)101 static void __init reserve_iommu_exclusion_range(
102     struct amd_iommu *iommu, uint64_t base, uint64_t limit)
103 {
104     /* need to extend exclusion range? */
105     if ( iommu->exclusion_enable )
106     {
107         if ( iommu->exclusion_base < base )
108             base = iommu->exclusion_base;
109         if ( iommu->exclusion_limit > limit )
110             limit = iommu->exclusion_limit;
111     }
112 
113     iommu->exclusion_enable = IOMMU_CONTROL_ENABLED;
114     iommu->exclusion_base = base;
115     iommu->exclusion_limit = limit;
116 }
117 
reserve_iommu_exclusion_range_all(struct amd_iommu * iommu,unsigned long base,unsigned long limit)118 static void __init reserve_iommu_exclusion_range_all(
119     struct amd_iommu *iommu,
120     unsigned long base, unsigned long limit)
121 {
122     reserve_iommu_exclusion_range(iommu, base, limit);
123     iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
124 }
125 
reserve_unity_map_for_device(u16 seg,u16 bdf,unsigned long base,unsigned long length,u8 iw,u8 ir)126 static void __init reserve_unity_map_for_device(
127     u16 seg, u16 bdf, unsigned long base,
128     unsigned long length, u8 iw, u8 ir)
129 {
130     struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
131     unsigned long old_top, new_top;
132 
133     /* need to extend unity-mapped range? */
134     if ( ivrs_mappings[bdf].unity_map_enable )
135     {
136         old_top = ivrs_mappings[bdf].addr_range_start +
137             ivrs_mappings[bdf].addr_range_length;
138         new_top = base + length;
139         if ( old_top > new_top )
140             new_top = old_top;
141         if ( ivrs_mappings[bdf].addr_range_start < base )
142             base = ivrs_mappings[bdf].addr_range_start;
143         length = new_top - base;
144     }
145 
146     /* extend r/w permissioms and keep aggregate */
147     ivrs_mappings[bdf].write_permission = iw;
148     ivrs_mappings[bdf].read_permission = ir;
149     ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED;
150     ivrs_mappings[bdf].addr_range_start = base;
151     ivrs_mappings[bdf].addr_range_length = length;
152 }
153 
register_exclusion_range_for_all_devices(unsigned long base,unsigned long limit,u8 iw,u8 ir)154 static int __init register_exclusion_range_for_all_devices(
155     unsigned long base, unsigned long limit, u8 iw, u8 ir)
156 {
157     int seg = 0; /* XXX */
158     unsigned long range_top, iommu_top, length;
159     struct amd_iommu *iommu;
160     unsigned int bdf;
161 
162     /* is part of exclusion range inside of IOMMU virtual address space? */
163     /* note: 'limit' parameter is assumed to be page-aligned */
164     range_top = limit + PAGE_SIZE;
165     iommu_top = max_page * PAGE_SIZE;
166     if ( base < iommu_top )
167     {
168         if ( range_top > iommu_top )
169             range_top = iommu_top;
170         length = range_top - base;
171         /* reserve r/w unity-mapped page entries for devices */
172         /* note: these entries are part of the exclusion range */
173         for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
174             reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
175         /* push 'base' just outside of virtual address space */
176         base = iommu_top;
177     }
178     /* register IOMMU exclusion range settings */
179     if ( limit >= iommu_top )
180     {
181         for_each_amd_iommu( iommu )
182             reserve_iommu_exclusion_range_all(iommu, base, limit);
183     }
184 
185     return 0;
186 }
187 
register_exclusion_range_for_device(u16 bdf,unsigned long base,unsigned long limit,u8 iw,u8 ir)188 static int __init register_exclusion_range_for_device(
189     u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir)
190 {
191     int seg = 0; /* XXX */
192     struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
193     unsigned long range_top, iommu_top, length;
194     struct amd_iommu *iommu;
195     u16 req;
196 
197     iommu = find_iommu_for_device(seg, bdf);
198     if ( !iommu )
199     {
200         AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id %#x!\n", bdf);
201         return -ENODEV;
202     }
203     req = ivrs_mappings[bdf].dte_requestor_id;
204 
205     /* note: 'limit' parameter is assumed to be page-aligned */
206     range_top = limit + PAGE_SIZE;
207     iommu_top = max_page * PAGE_SIZE;
208     if ( base < iommu_top )
209     {
210         if ( range_top > iommu_top )
211             range_top = iommu_top;
212         length = range_top - base;
213         /* reserve unity-mapped page entries for device */
214         /* note: these entries are part of the exclusion range */
215         reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
216         reserve_unity_map_for_device(seg, req, base, length, iw, ir);
217 
218         /* push 'base' just outside of virtual address space */
219         base = iommu_top;
220     }
221 
222     /* register IOMMU exclusion range settings for device */
223     if ( limit >= iommu_top  )
224     {
225         reserve_iommu_exclusion_range(iommu, base, limit);
226         ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
227         ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
228     }
229 
230     return 0;
231 }
232 
register_exclusion_range_for_iommu_devices(struct amd_iommu * iommu,unsigned long base,unsigned long limit,u8 iw,u8 ir)233 static int __init register_exclusion_range_for_iommu_devices(
234     struct amd_iommu *iommu,
235     unsigned long base, unsigned long limit, u8 iw, u8 ir)
236 {
237     unsigned long range_top, iommu_top, length;
238     unsigned int bdf;
239     u16 req;
240 
241     /* is part of exclusion range inside of IOMMU virtual address space? */
242     /* note: 'limit' parameter is assumed to be page-aligned */
243     range_top = limit + PAGE_SIZE;
244     iommu_top = max_page * PAGE_SIZE;
245     if ( base < iommu_top )
246     {
247         if ( range_top > iommu_top )
248             range_top = iommu_top;
249         length = range_top - base;
250         /* reserve r/w unity-mapped page entries for devices */
251         /* note: these entries are part of the exclusion range */
252         for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
253         {
254             if ( iommu == find_iommu_for_device(iommu->seg, bdf) )
255             {
256                 reserve_unity_map_for_device(iommu->seg, bdf, base, length,
257                                              iw, ir);
258                 req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
259                 reserve_unity_map_for_device(iommu->seg, req, base, length,
260                                              iw, ir);
261             }
262         }
263 
264         /* push 'base' just outside of virtual address space */
265         base = iommu_top;
266     }
267 
268     /* register IOMMU exclusion range settings */
269     if ( limit >= iommu_top )
270         reserve_iommu_exclusion_range_all(iommu, base, limit);
271     return 0;
272 }
273 
parse_ivmd_device_select(const struct acpi_ivrs_memory * ivmd_block,unsigned long base,unsigned long limit,u8 iw,u8 ir)274 static int __init parse_ivmd_device_select(
275     const struct acpi_ivrs_memory *ivmd_block,
276     unsigned long base, unsigned long limit, u8 iw, u8 ir)
277 {
278     u16 bdf;
279 
280     bdf = ivmd_block->header.device_id;
281     if ( bdf >= ivrs_bdf_entries )
282     {
283         AMD_IOMMU_DEBUG("IVMD Error: Invalid Dev_Id %#x\n", bdf);
284         return -ENODEV;
285     }
286 
287     return register_exclusion_range_for_device(bdf, base, limit, iw, ir);
288 }
289 
parse_ivmd_device_range(const struct acpi_ivrs_memory * ivmd_block,unsigned long base,unsigned long limit,u8 iw,u8 ir)290 static int __init parse_ivmd_device_range(
291     const struct acpi_ivrs_memory *ivmd_block,
292     unsigned long base, unsigned long limit, u8 iw, u8 ir)
293 {
294     unsigned int first_bdf, last_bdf, bdf;
295     int error;
296 
297     first_bdf = ivmd_block->header.device_id;
298     if ( first_bdf >= ivrs_bdf_entries )
299     {
300         AMD_IOMMU_DEBUG("IVMD Error: "
301                         "Invalid Range_First Dev_Id %#x\n", first_bdf);
302         return -ENODEV;
303     }
304 
305     last_bdf = ivmd_block->aux_data;
306     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
307     {
308         AMD_IOMMU_DEBUG("IVMD Error: "
309                         "Invalid Range_Last Dev_Id %#x\n", last_bdf);
310         return -ENODEV;
311     }
312 
313     for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ )
314         error = register_exclusion_range_for_device(
315             bdf, base, limit, iw, ir);
316 
317     return error;
318 }
319 
parse_ivmd_device_iommu(const struct acpi_ivrs_memory * ivmd_block,unsigned long base,unsigned long limit,u8 iw,u8 ir)320 static int __init parse_ivmd_device_iommu(
321     const struct acpi_ivrs_memory *ivmd_block,
322     unsigned long base, unsigned long limit, u8 iw, u8 ir)
323 {
324     int seg = 0; /* XXX */
325     struct amd_iommu *iommu;
326 
327     /* find target IOMMU */
328     iommu = find_iommu_from_bdf_cap(seg, ivmd_block->header.device_id,
329                                     ivmd_block->aux_data);
330     if ( !iommu )
331     {
332         AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id %#x Cap %#x\n",
333                         ivmd_block->header.device_id, ivmd_block->aux_data);
334         return -ENODEV;
335     }
336 
337     return register_exclusion_range_for_iommu_devices(
338         iommu, base, limit, iw, ir);
339 }
340 
parse_ivmd_block(const struct acpi_ivrs_memory * ivmd_block)341 static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block)
342 {
343     unsigned long start_addr, mem_length, base, limit;
344     u8 iw, ir;
345 
346     if ( ivmd_block->header.length < sizeof(*ivmd_block) )
347     {
348         AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Length!\n");
349         return -ENODEV;
350     }
351 
352     start_addr = (unsigned long)ivmd_block->start_address;
353     mem_length = (unsigned long)ivmd_block->memory_length;
354     base = start_addr & PAGE_MASK;
355     limit = (start_addr + mem_length - 1) & PAGE_MASK;
356 
357     AMD_IOMMU_DEBUG("IVMD Block: type %#x phys %#lx len %#lx\n",
358                     ivmd_block->header.type, start_addr, mem_length);
359 
360     if ( ivmd_block->header.flags & ACPI_IVMD_EXCLUSION_RANGE )
361         iw = ir = IOMMU_CONTROL_ENABLED;
362     else if ( ivmd_block->header.flags & ACPI_IVMD_UNITY )
363     {
364         iw = ivmd_block->header.flags & ACPI_IVMD_READ ?
365             IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED;
366         ir = ivmd_block->header.flags & ACPI_IVMD_WRITE ?
367             IOMMU_CONTROL_ENABLED : IOMMU_CONTROL_DISABLED;
368     }
369     else
370     {
371         AMD_IOMMU_DEBUG("IVMD Error: Invalid Flag Field!\n");
372         return -ENODEV;
373     }
374 
375     switch( ivmd_block->header.type )
376     {
377     case ACPI_IVRS_TYPE_MEMORY_ALL:
378         return register_exclusion_range_for_all_devices(
379             base, limit, iw, ir);
380 
381     case ACPI_IVRS_TYPE_MEMORY_ONE:
382         return parse_ivmd_device_select(ivmd_block,
383                                         base, limit, iw, ir);
384 
385     case ACPI_IVRS_TYPE_MEMORY_RANGE:
386         return parse_ivmd_device_range(ivmd_block,
387                                        base, limit, iw, ir);
388 
389     case ACPI_IVRS_TYPE_MEMORY_IOMMU:
390         return parse_ivmd_device_iommu(ivmd_block,
391                                        base, limit, iw, ir);
392 
393     default:
394         AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Type!\n");
395         return -ENODEV;
396     }
397 }
398 
parse_ivhd_device_padding(u16 pad_length,u16 header_length,u16 block_length)399 static u16 __init parse_ivhd_device_padding(
400     u16 pad_length, u16 header_length, u16 block_length)
401 {
402     if ( header_length < (block_length + pad_length) )
403     {
404         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
405         return 0;
406     }
407 
408     return pad_length;
409 }
410 
parse_ivhd_device_select(const struct acpi_ivrs_device4 * select,struct amd_iommu * iommu)411 static u16 __init parse_ivhd_device_select(
412     const struct acpi_ivrs_device4 *select, struct amd_iommu *iommu)
413 {
414     u16 bdf;
415 
416     bdf = select->header.id;
417     if ( bdf >= ivrs_bdf_entries )
418     {
419         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id %#x\n", bdf);
420         return 0;
421     }
422 
423     add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, iommu);
424 
425     return sizeof(*select);
426 }
427 
parse_ivhd_device_range(const struct acpi_ivhd_device_range * range,u16 header_length,u16 block_length,struct amd_iommu * iommu)428 static u16 __init parse_ivhd_device_range(
429     const struct acpi_ivhd_device_range *range,
430     u16 header_length, u16 block_length, struct amd_iommu *iommu)
431 {
432     unsigned int dev_length, first_bdf, last_bdf, bdf;
433 
434     dev_length = sizeof(*range);
435     if ( header_length < (block_length + dev_length) )
436     {
437         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
438         return 0;
439     }
440 
441     if ( range->end.header.type != ACPI_IVRS_TYPE_END )
442     {
443         AMD_IOMMU_DEBUG("IVHD Error: "
444                         "Invalid Range: End_Type %#x\n",
445                         range->end.header.type);
446         return 0;
447     }
448 
449     first_bdf = range->start.header.id;
450     if ( first_bdf >= ivrs_bdf_entries )
451     {
452         AMD_IOMMU_DEBUG("IVHD Error: "
453                         "Invalid Range: First Dev_Id %#x\n", first_bdf);
454         return 0;
455     }
456 
457     last_bdf = range->end.header.id;
458     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
459     {
460         AMD_IOMMU_DEBUG("IVHD Error: "
461                         "Invalid Range: Last Dev_Id %#x\n", last_bdf);
462         return 0;
463     }
464 
465     AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n", first_bdf, last_bdf);
466 
467     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
468         add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting,
469                                iommu);
470 
471     return dev_length;
472 }
473 
parse_ivhd_device_alias(const struct acpi_ivrs_device8a * alias,u16 header_length,u16 block_length,struct amd_iommu * iommu)474 static u16 __init parse_ivhd_device_alias(
475     const struct acpi_ivrs_device8a *alias,
476     u16 header_length, u16 block_length, struct amd_iommu *iommu)
477 {
478     u16 dev_length, alias_id, bdf;
479 
480     dev_length = sizeof(*alias);
481     if ( header_length < (block_length + dev_length) )
482     {
483         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
484         return 0;
485     }
486 
487     bdf = alias->header.id;
488     if ( bdf >= ivrs_bdf_entries )
489     {
490         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id %#x\n", bdf);
491         return 0;
492     }
493 
494     alias_id = alias->used_id;
495     if ( alias_id >= ivrs_bdf_entries )
496     {
497         AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id %#x\n", alias_id);
498         return 0;
499     }
500 
501     AMD_IOMMU_DEBUG(" Dev_Id Alias: %#x\n", alias_id);
502 
503     add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, iommu);
504 
505     return dev_length;
506 }
507 
parse_ivhd_device_alias_range(const struct acpi_ivhd_device_alias_range * range,u16 header_length,u16 block_length,struct amd_iommu * iommu)508 static u16 __init parse_ivhd_device_alias_range(
509     const struct acpi_ivhd_device_alias_range *range,
510     u16 header_length, u16 block_length, struct amd_iommu *iommu)
511 {
512 
513     unsigned int dev_length, first_bdf, last_bdf, alias_id, bdf;
514 
515     dev_length = sizeof(*range);
516     if ( header_length < (block_length + dev_length) )
517     {
518         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
519         return 0;
520     }
521 
522     if ( range->end.header.type != ACPI_IVRS_TYPE_END )
523     {
524         AMD_IOMMU_DEBUG("IVHD Error: "
525                         "Invalid Range: End_Type %#x\n",
526                         range->end.header.type);
527         return 0;
528     }
529 
530     first_bdf = range->alias.header.id;
531     if ( first_bdf >= ivrs_bdf_entries )
532     {
533         AMD_IOMMU_DEBUG("IVHD Error: "
534                         "Invalid Range: First Dev_Id %#x\n", first_bdf);
535         return 0;
536     }
537 
538     last_bdf = range->end.header.id;
539     if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf )
540     {
541         AMD_IOMMU_DEBUG(
542             "IVHD Error: Invalid Range: Last Dev_Id %#x\n", last_bdf);
543         return 0;
544     }
545 
546     alias_id = range->alias.used_id;
547     if ( alias_id >= ivrs_bdf_entries )
548     {
549         AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id %#x\n", alias_id);
550         return 0;
551     }
552 
553     AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x alias %#x\n",
554                     first_bdf, last_bdf, alias_id);
555 
556     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
557         add_ivrs_mapping_entry(bdf, alias_id, range->alias.header.data_setting,
558                                iommu);
559 
560     return dev_length;
561 }
562 
parse_ivhd_device_extended(const struct acpi_ivrs_device8b * ext,u16 header_length,u16 block_length,struct amd_iommu * iommu)563 static u16 __init parse_ivhd_device_extended(
564     const struct acpi_ivrs_device8b *ext,
565     u16 header_length, u16 block_length, struct amd_iommu *iommu)
566 {
567     u16 dev_length, bdf;
568 
569     dev_length = sizeof(*ext);
570     if ( header_length < (block_length + dev_length) )
571     {
572         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
573         return 0;
574     }
575 
576     bdf = ext->header.id;
577     if ( bdf >= ivrs_bdf_entries )
578     {
579         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id %#x\n", bdf);
580         return 0;
581     }
582 
583     add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, iommu);
584 
585     return dev_length;
586 }
587 
parse_ivhd_device_extended_range(const struct acpi_ivhd_device_extended_range * range,u16 header_length,u16 block_length,struct amd_iommu * iommu)588 static u16 __init parse_ivhd_device_extended_range(
589     const struct acpi_ivhd_device_extended_range *range,
590     u16 header_length, u16 block_length, struct amd_iommu *iommu)
591 {
592     unsigned int dev_length, first_bdf, last_bdf, bdf;
593 
594     dev_length = sizeof(*range);
595     if ( header_length < (block_length + dev_length) )
596     {
597         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
598         return 0;
599     }
600 
601     if ( range->end.header.type != ACPI_IVRS_TYPE_END )
602     {
603         AMD_IOMMU_DEBUG("IVHD Error: "
604                         "Invalid Range: End_Type %#x\n",
605                         range->end.header.type);
606         return 0;
607     }
608 
609     first_bdf = range->extended.header.id;
610     if ( first_bdf >= ivrs_bdf_entries )
611     {
612         AMD_IOMMU_DEBUG("IVHD Error: "
613                         "Invalid Range: First Dev_Id %#x\n", first_bdf);
614         return 0;
615     }
616 
617     last_bdf = range->end.header.id;
618     if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) )
619     {
620         AMD_IOMMU_DEBUG("IVHD Error: "
621                         "Invalid Range: Last Dev_Id %#x\n", last_bdf);
622         return 0;
623     }
624 
625     AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n",
626                     first_bdf, last_bdf);
627 
628     for ( bdf = first_bdf; bdf <= last_bdf; bdf++ )
629         add_ivrs_mapping_entry(bdf, bdf, range->extended.header.data_setting,
630                                iommu);
631 
632     return dev_length;
633 }
634 
parse_ivrs_ioapic(const char * str)635 static int __init parse_ivrs_ioapic(const char *str)
636 {
637     const char *s = str;
638     unsigned long id;
639     unsigned int seg, bus, dev, func;
640     unsigned int idx;
641 
642     if ( *s != '[' )
643         return -EINVAL;
644 
645     id = simple_strtoul(s + 1, &s, 0);
646     if ( *s != ']' || *++s != '=' )
647         return -EINVAL;
648 
649     s = parse_pci(s + 1, &seg, &bus, &dev, &func);
650     if ( !s || *s )
651         return -EINVAL;
652 
653     idx = ioapic_id_to_index(id);
654     if ( idx == MAX_IO_APICS )
655     {
656         idx = get_next_ioapic_sbdf_index();
657         if ( idx == MAX_IO_APICS )
658         {
659             printk(XENLOG_ERR "Error: %s: Too many IO APICs.\n", __func__);
660             return -EINVAL;
661         }
662     }
663 
664     ioapic_sbdf[idx].bdf = PCI_BDF(bus, dev, func);
665     ioapic_sbdf[idx].seg = seg;
666     ioapic_sbdf[idx].id = id;
667     ioapic_sbdf[idx].cmdline = true;
668 
669     return 0;
670 }
671 custom_param("ivrs_ioapic[", parse_ivrs_ioapic);
672 
parse_ivrs_hpet(const char * str)673 static int __init parse_ivrs_hpet(const char *str)
674 {
675     const char *s = str;
676     unsigned long id;
677     unsigned int seg, bus, dev, func;
678 
679     if ( *s != '[' )
680         return -EINVAL;
681 
682     id = simple_strtoul(s + 1, &s, 0);
683     if ( id != (typeof(hpet_sbdf.id))id || *s != ']' || *++s != '=' )
684         return -EINVAL;
685 
686     s = parse_pci(s + 1, &seg, &bus, &dev, &func);
687     if ( !s || *s )
688         return -EINVAL;
689 
690     hpet_sbdf.id = id;
691     hpet_sbdf.bdf = PCI_BDF(bus, dev, func);
692     hpet_sbdf.seg = seg;
693     hpet_sbdf.init = HPET_CMDL;
694 
695     return 0;
696 }
697 custom_param("ivrs_hpet[", parse_ivrs_hpet);
698 
parse_ivhd_device_special(const struct acpi_ivrs_device8c * special,u16 seg,u16 header_length,u16 block_length,struct amd_iommu * iommu)699 static u16 __init parse_ivhd_device_special(
700     const struct acpi_ivrs_device8c *special, u16 seg,
701     u16 header_length, u16 block_length, struct amd_iommu *iommu)
702 {
703     u16 dev_length, bdf;
704     unsigned int apic, idx;
705 
706     dev_length = sizeof(*special);
707     if ( header_length < (block_length + dev_length) )
708     {
709         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n");
710         return 0;
711     }
712 
713     bdf = special->used_id;
714     if ( bdf >= ivrs_bdf_entries )
715     {
716         AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id %#x\n", bdf);
717         return 0;
718     }
719 
720     AMD_IOMMU_DEBUG("IVHD Special: %04x:%02x:%02x.%u variety %#x handle %#x\n",
721                     seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf),
722                     special->variety, special->handle);
723     add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu);
724 
725     switch ( special->variety )
726     {
727     case ACPI_IVHD_IOAPIC:
728         if ( !iommu_intremap )
729             break;
730         /*
731          * Some BIOSes have IOAPIC broken entries so we check for IVRS
732          * consistency here --- whether entry's IOAPIC ID is valid and
733          * whether there are conflicting/duplicated entries.
734          */
735         for ( idx = 0; idx < nr_ioapic_sbdf; idx++ )
736         {
737             if ( ioapic_sbdf[idx].bdf == bdf &&
738                  ioapic_sbdf[idx].seg == seg &&
739                  ioapic_sbdf[idx].cmdline )
740                 break;
741         }
742         if ( idx < nr_ioapic_sbdf )
743         {
744             AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x"
745                             "(IVRS: %#x devID %04x:%02x:%02x.%u)\n",
746                             ioapic_sbdf[idx].id, special->handle, seg,
747                             PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf));
748             break;
749         }
750 
751         for ( apic = 0; apic < nr_ioapics; apic++ )
752         {
753             if ( IO_APIC_ID(apic) != special->handle )
754                 continue;
755 
756             idx = ioapic_id_to_index(special->handle);
757             if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].cmdline )
758                 AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x\n",
759                                 special->handle);
760             else if ( idx != MAX_IO_APICS && ioapic_sbdf[idx].pin_2_idx )
761             {
762                 if ( ioapic_sbdf[idx].bdf == bdf &&
763                      ioapic_sbdf[idx].seg == seg )
764                     AMD_IOMMU_DEBUG("IVHD Warning: Duplicate IO-APIC %#x entries\n",
765                                     special->handle);
766                 else
767                 {
768                     printk(XENLOG_ERR "IVHD Error: Conflicting IO-APIC %#x entries\n",
769                            special->handle);
770                     if ( amd_iommu_perdev_intremap )
771                         return 0;
772                 }
773             }
774             else
775             {
776                 idx = get_next_ioapic_sbdf_index();
777                 if ( idx == MAX_IO_APICS )
778                 {
779                     printk(XENLOG_ERR "IVHD Error: Too many IO APICs.\n");
780                     return 0;
781                 }
782 
783                 /* set device id of ioapic */
784                 ioapic_sbdf[idx].bdf = bdf;
785                 ioapic_sbdf[idx].seg = seg;
786                 ioapic_sbdf[idx].id = special->handle;
787 
788                 ioapic_sbdf[idx].pin_2_idx = xmalloc_array(
789                     u16, nr_ioapic_entries[apic]);
790                 if ( nr_ioapic_entries[apic] &&
791                      !ioapic_sbdf[idx].pin_2_idx )
792                 {
793                     printk(XENLOG_ERR "IVHD Error: Out of memory\n");
794                     return 0;
795                 }
796                 memset(ioapic_sbdf[idx].pin_2_idx, -1,
797                        nr_ioapic_entries[apic] *
798                        sizeof(*ioapic_sbdf->pin_2_idx));
799             }
800             break;
801         }
802         if ( apic == nr_ioapics )
803         {
804             printk(XENLOG_ERR "IVHD Error: Invalid IO-APIC %#x\n",
805                    special->handle);
806             return 0;
807         }
808         break;
809     case ACPI_IVHD_HPET:
810         switch (hpet_sbdf.init)
811         {
812         case HPET_IVHD:
813             printk(XENLOG_WARNING "Only one IVHD HPET entry is supported.\n");
814             break;
815         case HPET_CMDL:
816             AMD_IOMMU_DEBUG("IVHD: Command line override present for HPET %#x "
817                             "(IVRS: %#x devID %04x:%02x:%02x.%u)\n",
818                             hpet_sbdf.id, special->handle, seg, PCI_BUS(bdf),
819                             PCI_SLOT(bdf), PCI_FUNC(bdf));
820             break;
821         case HPET_NONE:
822             /* set device id of hpet */
823             hpet_sbdf.id = special->handle;
824             hpet_sbdf.bdf = bdf;
825             hpet_sbdf.seg = seg;
826             hpet_sbdf.init = HPET_IVHD;
827             break;
828         default:
829             ASSERT_UNREACHABLE();
830             break;
831         }
832         break;
833     default:
834         printk(XENLOG_ERR "Unrecognized IVHD special variety %#x\n",
835                special->variety);
836         return 0;
837     }
838 
839     return dev_length;
840 }
841 
842 static inline size_t
get_ivhd_header_size(const struct acpi_ivrs_hardware * ivhd_block)843 get_ivhd_header_size(const struct acpi_ivrs_hardware *ivhd_block)
844 {
845     switch ( ivhd_block->header.type )
846     {
847     case ACPI_IVRS_TYPE_HARDWARE:
848         return offsetof(struct acpi_ivrs_hardware, efr_image);
849     case ACPI_IVRS_TYPE_HARDWARE_11H:
850         return sizeof(struct acpi_ivrs_hardware);
851     }
852     return 0;
853 }
854 
parse_ivhd_block(const struct acpi_ivrs_hardware * ivhd_block)855 static int __init parse_ivhd_block(const struct acpi_ivrs_hardware *ivhd_block)
856 {
857     const union acpi_ivhd_device *ivhd_device;
858     u16 block_length, dev_length;
859     size_t hdr_size = get_ivhd_header_size(ivhd_block) ;
860     struct amd_iommu *iommu;
861 
862     if ( ivhd_block->header.length < hdr_size )
863     {
864         AMD_IOMMU_DEBUG("IVHD Error: Invalid Block Length!\n");
865         return -ENODEV;
866     }
867 
868     iommu = find_iommu_from_bdf_cap(ivhd_block->pci_segment_group,
869                                     ivhd_block->header.device_id,
870                                     ivhd_block->capability_offset);
871     if ( !iommu )
872     {
873         AMD_IOMMU_DEBUG("IVHD Error: No IOMMU for Dev_Id %#x Cap %#x\n",
874                         ivhd_block->header.device_id,
875                         ivhd_block->capability_offset);
876         return -ENODEV;
877     }
878 
879     /* parse Device Entries */
880     block_length = hdr_size;
881     while ( ivhd_block->header.length >=
882             (block_length + sizeof(struct acpi_ivrs_de_header)) )
883     {
884         ivhd_device = (const void *)((const u8 *)ivhd_block + block_length);
885 
886         AMD_IOMMU_DEBUG("IVHD Device Entry: type %#x id %#x flags %#x\n",
887                         ivhd_device->header.type, ivhd_device->header.id,
888                         ivhd_device->header.data_setting);
889 
890         switch ( ivhd_device->header.type )
891         {
892         case ACPI_IVRS_TYPE_PAD4:
893             dev_length = parse_ivhd_device_padding(
894                 sizeof(u32),
895                 ivhd_block->header.length, block_length);
896             break;
897         case ACPI_IVRS_TYPE_PAD8:
898             dev_length = parse_ivhd_device_padding(
899                 sizeof(u64),
900                 ivhd_block->header.length, block_length);
901             break;
902         case ACPI_IVRS_TYPE_SELECT:
903             dev_length = parse_ivhd_device_select(&ivhd_device->select, iommu);
904             break;
905         case ACPI_IVRS_TYPE_START:
906             dev_length = parse_ivhd_device_range(
907                 &ivhd_device->range,
908                 ivhd_block->header.length, block_length, iommu);
909             break;
910         case ACPI_IVRS_TYPE_ALIAS_SELECT:
911             dev_length = parse_ivhd_device_alias(
912                 &ivhd_device->alias,
913                 ivhd_block->header.length, block_length, iommu);
914             break;
915         case ACPI_IVRS_TYPE_ALIAS_START:
916             dev_length = parse_ivhd_device_alias_range(
917                 &ivhd_device->alias_range,
918                 ivhd_block->header.length, block_length, iommu);
919             break;
920         case ACPI_IVRS_TYPE_EXT_SELECT:
921             dev_length = parse_ivhd_device_extended(
922                 &ivhd_device->extended,
923                 ivhd_block->header.length, block_length, iommu);
924             break;
925         case ACPI_IVRS_TYPE_EXT_START:
926             dev_length = parse_ivhd_device_extended_range(
927                 &ivhd_device->extended_range,
928                 ivhd_block->header.length, block_length, iommu);
929             break;
930         case ACPI_IVRS_TYPE_SPECIAL:
931             dev_length = parse_ivhd_device_special(
932                 &ivhd_device->special, ivhd_block->pci_segment_group,
933                 ivhd_block->header.length, block_length, iommu);
934             break;
935         default:
936             AMD_IOMMU_DEBUG("IVHD Error: Invalid Device Type!\n");
937             dev_length = 0;
938             break;
939         }
940 
941         block_length += dev_length;
942         if ( !dev_length )
943             return -ENODEV;
944     }
945 
946     return 0;
947 }
948 
dump_acpi_table_header(struct acpi_table_header * table)949 static void __init dump_acpi_table_header(struct acpi_table_header *table)
950 {
951     int i;
952 
953     AMD_IOMMU_DEBUG("ACPI Table:\n");
954     AMD_IOMMU_DEBUG(" Signature ");
955     for ( i = 0; i < ACPI_NAME_SIZE; i++ )
956         printk("%c", table->signature[i]);
957     printk("\n");
958 
959     AMD_IOMMU_DEBUG(" Length %#x\n", table->length);
960     AMD_IOMMU_DEBUG(" Revision %#x\n", table->revision);
961     AMD_IOMMU_DEBUG(" CheckSum %#x\n", table->checksum);
962 
963     AMD_IOMMU_DEBUG(" OEM_Id ");
964     for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ )
965         printk("%c", table->oem_id[i]);
966     printk("\n");
967 
968     AMD_IOMMU_DEBUG(" OEM_Table_Id ");
969     for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ )
970         printk("%c", table->oem_table_id[i]);
971     printk("\n");
972 
973     AMD_IOMMU_DEBUG(" OEM_Revision %#x\n", table->oem_revision);
974 
975     AMD_IOMMU_DEBUG(" Creator_Id ");
976     for ( i = 0; i < ACPI_NAME_SIZE; i++ )
977         printk("%c", table->asl_compiler_id[i]);
978     printk("\n");
979 
980     AMD_IOMMU_DEBUG(" Creator_Revision %#x\n",
981                     table->asl_compiler_revision);
982 
983 }
984 
985 #define to_ivhd_block(hdr) \
986     container_of(hdr, const struct acpi_ivrs_hardware, header)
987 #define to_ivmd_block(hdr) \
988     container_of(hdr, const struct acpi_ivrs_memory, header)
989 
is_ivhd_block(u8 type)990 static inline bool_t is_ivhd_block(u8 type)
991 {
992     return (type == ACPI_IVRS_TYPE_HARDWARE ||
993             type == ACPI_IVRS_TYPE_HARDWARE_11H);
994 }
995 
is_ivmd_block(u8 type)996 static inline bool_t is_ivmd_block(u8 type)
997 {
998     return (type == ACPI_IVRS_TYPE_MEMORY_ALL ||
999             type == ACPI_IVRS_TYPE_MEMORY_ONE ||
1000             type == ACPI_IVRS_TYPE_MEMORY_RANGE ||
1001             type == ACPI_IVRS_TYPE_MEMORY_IOMMU);
1002 }
1003 
parse_ivrs_table(struct acpi_table_header * table)1004 static int __init parse_ivrs_table(struct acpi_table_header *table)
1005 {
1006     const struct acpi_ivrs_header *ivrs_block;
1007     unsigned long length;
1008     unsigned int apic;
1009     bool_t sb_ioapic = !iommu_intremap;
1010     int error = 0;
1011 
1012     BUG_ON(!table);
1013 
1014     if ( iommu_debug )
1015         dump_acpi_table_header(table);
1016 
1017     /* parse IVRS blocks */
1018     length = sizeof(struct acpi_table_ivrs);
1019     while ( (error == 0) && (table->length > (length + sizeof(*ivrs_block))) )
1020     {
1021         ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
1022 
1023         AMD_IOMMU_DEBUG("IVRS Block: type %#x flags %#x len %#x id %#x\n",
1024                         ivrs_block->type, ivrs_block->flags,
1025                         ivrs_block->length, ivrs_block->device_id);
1026 
1027         if ( table->length < (length + ivrs_block->length) )
1028         {
1029             AMD_IOMMU_DEBUG("IVRS Error: "
1030                             "Table Length Exceeded: %#x -> %#lx\n",
1031                             table->length,
1032                             (length + ivrs_block->length));
1033             return -ENODEV;
1034         }
1035 
1036         if ( ivrs_block->type == ivhd_type )
1037             error = parse_ivhd_block(to_ivhd_block(ivrs_block));
1038         else if ( is_ivmd_block (ivrs_block->type) )
1039             error = parse_ivmd_block(to_ivmd_block(ivrs_block));
1040         length += ivrs_block->length;
1041     }
1042 
1043     /* Each IO-APIC must have been mentioned in the table. */
1044     for ( apic = 0; !error && iommu_intremap && apic < nr_ioapics; ++apic )
1045     {
1046         unsigned int idx;
1047 
1048         if ( !nr_ioapic_entries[apic] )
1049             continue;
1050 
1051         idx = ioapic_id_to_index(IO_APIC_ID(apic));
1052         if ( idx == MAX_IO_APICS )
1053         {
1054             printk(XENLOG_ERR "IVHD Error: no information for IO-APIC %#x\n",
1055                    IO_APIC_ID(apic));
1056             if ( amd_iommu_perdev_intremap )
1057                 return -ENXIO;
1058         }
1059 
1060         if ( !ioapic_sbdf[idx].seg &&
1061              /* SB IO-APIC is always on this device in AMD systems. */
1062              ioapic_sbdf[idx].bdf == PCI_BDF(0, 0x14, 0) )
1063             sb_ioapic = 1;
1064 
1065         if ( ioapic_sbdf[idx].pin_2_idx )
1066             continue;
1067 
1068         ioapic_sbdf[idx].pin_2_idx = xmalloc_array(
1069             u16, nr_ioapic_entries[apic]);
1070         if ( ioapic_sbdf[idx].pin_2_idx )
1071             memset(ioapic_sbdf[idx].pin_2_idx, -1,
1072                    nr_ioapic_entries[apic] * sizeof(*ioapic_sbdf->pin_2_idx));
1073         else
1074         {
1075             printk(XENLOG_ERR "IVHD Error: Out of memory\n");
1076             error = -ENOMEM;
1077         }
1078     }
1079 
1080     if ( !error && !sb_ioapic )
1081     {
1082         if ( amd_iommu_perdev_intremap )
1083             error = -ENXIO;
1084         printk("%sNo southbridge IO-APIC found in IVRS table\n",
1085                amd_iommu_perdev_intremap ? XENLOG_ERR : XENLOG_WARNING);
1086     }
1087 
1088     return error;
1089 }
1090 
detect_iommu_acpi(struct acpi_table_header * table)1091 static int __init detect_iommu_acpi(struct acpi_table_header *table)
1092 {
1093     const struct acpi_ivrs_header *ivrs_block;
1094     unsigned long i;
1095     unsigned long length = sizeof(struct acpi_table_ivrs);
1096     u8 checksum, *raw_table;
1097 
1098     /* validate checksum: sum of entire table == 0 */
1099     checksum = 0;
1100     raw_table = (u8 *)table;
1101     for ( i = 0; i < table->length; i++ )
1102         checksum += raw_table[i];
1103     if ( checksum )
1104     {
1105         AMD_IOMMU_DEBUG("IVRS Error: Invalid Checksum %#x\n", checksum);
1106         return -ENODEV;
1107     }
1108 
1109     while ( table->length > (length + sizeof(*ivrs_block)) )
1110     {
1111         ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
1112         if ( table->length < (length + ivrs_block->length) )
1113             return -ENODEV;
1114         if ( ivrs_block->type == ACPI_IVRS_TYPE_HARDWARE &&
1115              amd_iommu_detect_one_acpi(to_ivhd_block(ivrs_block)) != 0 )
1116             return -ENODEV;
1117         length += ivrs_block->length;
1118     }
1119     return 0;
1120 }
1121 
1122 #define UPDATE_LAST_BDF(x) do {\
1123    if ((x) > last_bdf) \
1124        last_bdf = (x); \
1125    } while(0);
1126 
get_last_bdf_ivhd(const struct acpi_ivrs_hardware * ivhd_block)1127 static int __init get_last_bdf_ivhd(
1128     const struct acpi_ivrs_hardware *ivhd_block)
1129 {
1130     const union acpi_ivhd_device *ivhd_device;
1131     u16 block_length, dev_length;
1132     size_t hdr_size = get_ivhd_header_size(ivhd_block);
1133     int last_bdf = 0;
1134 
1135     if ( ivhd_block->header.length < hdr_size )
1136     {
1137         AMD_IOMMU_DEBUG("IVHD Error: Invalid Block Length!\n");
1138         return -ENODEV;
1139     }
1140 
1141     block_length = hdr_size;
1142     while ( ivhd_block->header.length >=
1143             (block_length + sizeof(struct acpi_ivrs_de_header)) )
1144     {
1145         ivhd_device = (const void *)((u8 *)ivhd_block + block_length);
1146 
1147         switch ( ivhd_device->header.type )
1148         {
1149         case ACPI_IVRS_TYPE_PAD4:
1150             dev_length = sizeof(u32);
1151             break;
1152         case ACPI_IVRS_TYPE_PAD8:
1153             dev_length = sizeof(u64);
1154             break;
1155         case ACPI_IVRS_TYPE_SELECT:
1156             UPDATE_LAST_BDF(ivhd_device->select.header.id);
1157             dev_length = sizeof(ivhd_device->header);
1158             break;
1159         case ACPI_IVRS_TYPE_ALIAS_SELECT:
1160             UPDATE_LAST_BDF(ivhd_device->alias.header.id);
1161             dev_length = sizeof(ivhd_device->alias);
1162             break;
1163         case ACPI_IVRS_TYPE_EXT_SELECT:
1164             UPDATE_LAST_BDF(ivhd_device->extended.header.id);
1165             dev_length = sizeof(ivhd_device->extended);
1166             break;
1167         case ACPI_IVRS_TYPE_START:
1168             UPDATE_LAST_BDF(ivhd_device->range.end.header.id);
1169             dev_length = sizeof(ivhd_device->range);
1170             break;
1171         case ACPI_IVRS_TYPE_ALIAS_START:
1172             UPDATE_LAST_BDF(ivhd_device->alias_range.end.header.id)
1173             dev_length = sizeof(ivhd_device->alias_range);
1174             break;
1175         case ACPI_IVRS_TYPE_EXT_START:
1176             UPDATE_LAST_BDF(ivhd_device->extended_range.end.header.id)
1177             dev_length = sizeof(ivhd_device->extended_range);
1178             break;
1179         case ACPI_IVRS_TYPE_SPECIAL:
1180             UPDATE_LAST_BDF(ivhd_device->special.used_id)
1181             dev_length = sizeof(ivhd_device->special);
1182             break;
1183         default:
1184             AMD_IOMMU_DEBUG("IVHD Error: Invalid Device Type!\n");
1185             dev_length = 0;
1186             break;
1187         }
1188 
1189         block_length += dev_length;
1190         if ( !dev_length )
1191             return -ENODEV;
1192     }
1193 
1194     return last_bdf;
1195 }
1196 
get_last_bdf_acpi(struct acpi_table_header * table)1197 static int __init get_last_bdf_acpi(struct acpi_table_header *table)
1198 {
1199     const struct acpi_ivrs_header *ivrs_block;
1200     unsigned long length = sizeof(struct acpi_table_ivrs);
1201     int last_bdf = 0;
1202 
1203     while ( table->length > (length + sizeof(*ivrs_block)) )
1204     {
1205         ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
1206         if ( table->length < (length + ivrs_block->length) )
1207             return -ENODEV;
1208         if ( ivrs_block->type == ivhd_type )
1209         {
1210             int ret = get_last_bdf_ivhd(to_ivhd_block(ivrs_block));
1211 
1212             if ( ret < 0 )
1213                 return ret;
1214             UPDATE_LAST_BDF(ret);
1215         }
1216         length += ivrs_block->length;
1217     }
1218 
1219     return last_bdf;
1220 }
1221 
amd_iommu_detect_acpi(void)1222 int __init amd_iommu_detect_acpi(void)
1223 {
1224     return acpi_table_parse(ACPI_SIG_IVRS, detect_iommu_acpi);
1225 }
1226 
amd_iommu_get_ivrs_dev_entries(void)1227 int __init amd_iommu_get_ivrs_dev_entries(void)
1228 {
1229     int ret = acpi_table_parse(ACPI_SIG_IVRS, get_last_bdf_acpi);
1230 
1231     return ret < 0 ? ret : (ret | PCI_FUNC(~0)) + 1;
1232 }
1233 
amd_iommu_update_ivrs_mapping_acpi(void)1234 int __init amd_iommu_update_ivrs_mapping_acpi(void)
1235 {
1236     return acpi_table_parse(ACPI_SIG_IVRS, parse_ivrs_table);
1237 }
1238 
1239 static int __init
get_supported_ivhd_type(struct acpi_table_header * table)1240 get_supported_ivhd_type(struct acpi_table_header *table)
1241 {
1242     size_t length = sizeof(struct acpi_table_ivrs);
1243     const struct acpi_ivrs_header *ivrs_block, *blk = NULL;
1244 
1245     while ( table->length > (length + sizeof(*ivrs_block)) )
1246     {
1247         ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
1248 
1249         if ( table->length < (length + ivrs_block->length) )
1250         {
1251             AMD_IOMMU_DEBUG("IVRS Error: "
1252                             "Table Length Exceeded: %#x -> %#lx\n",
1253                             table->length,
1254                             (length + ivrs_block->length));
1255             return -ENODEV;
1256         }
1257 
1258         if ( is_ivhd_block(ivrs_block->type) &&
1259             (!blk || blk->type < ivrs_block->type) )
1260         {
1261             AMD_IOMMU_DEBUG("IVRS Block: Found type %#x flags %#x len %#x id %#x\n",
1262                             ivrs_block->type, ivrs_block->flags,
1263                             ivrs_block->length, ivrs_block->device_id);
1264             blk = ivrs_block;
1265         }
1266         length += ivrs_block->length;
1267     }
1268 
1269     if ( !blk )
1270     {
1271         printk(XENLOG_ERR "Cannot find supported IVHD type.\n");
1272         return -ENODEV;
1273     }
1274 
1275     AMD_IOMMU_DEBUG("Using IVHD type %#x\n", blk->type);
1276 
1277     return blk->type;
1278 }
1279 
amd_iommu_get_supported_ivhd_type(void)1280 int __init amd_iommu_get_supported_ivhd_type(void)
1281 {
1282     return acpi_table_parse(ACPI_SIG_IVRS, get_supported_ivhd_type);
1283 }
1284