1 /*
2  * This program is free software; you can redistribute it and/or modify it
3  * under the terms and conditions of the GNU General Public License,
4  * version 2, as published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope it will be useful, but WITHOUT
7  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9  * more details.
10  *
11  * You should have received a copy of the GNU General Public License along with
12  * this program; If not, see <http://www.gnu.org/licenses/>.
13  */
14 
15 #include <xen/sched.h>
16 #include <xen/iommu.h>
17 #include <xen/paging.h>
18 #include <xen/guest_access.h>
19 #include <xen/event.h>
20 #include <xen/param.h>
21 #include <xen/softirq.h>
22 #include <xen/keyhandler.h>
23 #include <xsm/xsm.h>
24 
25 #ifdef CONFIG_X86
26 #include <asm/e820.h>
27 #endif
28 
29 unsigned int __read_mostly iommu_dev_iotlb_timeout = 1000;
30 integer_param("iommu_dev_iotlb_timeout", iommu_dev_iotlb_timeout);
31 
32 bool __initdata iommu_enable = 1;
33 bool __read_mostly iommu_enabled;
34 bool __read_mostly force_iommu;
35 bool __read_mostly iommu_verbose;
36 static bool __read_mostly iommu_crash_disable;
37 
38 #define IOMMU_quarantine_none         0 /* aka false */
39 #define IOMMU_quarantine_basic        1 /* aka true */
40 #define IOMMU_quarantine_scratch_page 2
41 #ifdef CONFIG_HAS_PCI
42 uint8_t __read_mostly iommu_quarantine =
43 # if defined(CONFIG_IOMMU_QUARANTINE_NONE)
44     IOMMU_quarantine_none;
45 # elif defined(CONFIG_IOMMU_QUARANTINE_BASIC)
46     IOMMU_quarantine_basic;
47 # elif defined(CONFIG_IOMMU_QUARANTINE_SCRATCH_PAGE)
48     IOMMU_quarantine_scratch_page;
49 # endif
50 #else
51 # define iommu_quarantine IOMMU_quarantine_none
52 #endif /* CONFIG_HAS_PCI */
53 
54 static bool __hwdom_initdata iommu_hwdom_none;
55 bool __hwdom_initdata iommu_hwdom_strict;
56 bool __read_mostly iommu_hwdom_passthrough;
57 bool __hwdom_initdata iommu_hwdom_inclusive;
58 int8_t __hwdom_initdata iommu_hwdom_reserved = -1;
59 
60 #ifndef iommu_hap_pt_share
61 bool __read_mostly iommu_hap_pt_share = true;
62 #endif
63 
64 bool __read_mostly iommu_debug;
65 
66 DEFINE_PER_CPU(bool, iommu_dont_flush_iotlb);
67 
parse_iommu_param(const char * s)68 static int __init cf_check parse_iommu_param(const char *s)
69 {
70     const char *ss;
71     int val, rc = 0;
72 
73     do {
74         ss = strchr(s, ',');
75         if ( !ss )
76             ss = strchr(s, '\0');
77 
78         if ( (val = parse_bool(s, ss)) >= 0 )
79             iommu_enable = val;
80         else if ( (val = parse_boolean("force", s, ss)) >= 0 ||
81                   (val = parse_boolean("required", s, ss)) >= 0 )
82             force_iommu = val;
83 #ifdef CONFIG_HAS_PCI
84         else if ( (val = parse_boolean("quarantine", s, ss)) >= 0 )
85             iommu_quarantine = val;
86         else if ( ss == s + 23 && !strncmp(s, "quarantine=scratch-page", 23) )
87             iommu_quarantine = IOMMU_quarantine_scratch_page;
88 #endif
89         else if ( (val = parse_boolean("igfx", s, ss)) >= 0 )
90 #ifdef CONFIG_INTEL_IOMMU
91             iommu_igfx = val;
92 #else
93             no_config_param("INTEL_IOMMU", "iommu", s, ss);
94 #endif
95         else if ( (val = parse_boolean("qinval", s, ss)) >= 0 )
96 #ifdef CONFIG_INTEL_IOMMU
97             iommu_qinval = val;
98 #else
99             no_config_param("INTEL_IOMMU", "iommu", s, ss);
100 #endif
101 #ifdef CONFIG_X86
102         else if ( (val = parse_boolean("superpages", s, ss)) >= 0 )
103             iommu_superpages = val;
104 #endif
105         else if ( (val = parse_boolean("verbose", s, ss)) >= 0 )
106             iommu_verbose = val;
107 #ifndef iommu_snoop
108         else if ( (val = parse_boolean("snoop", s, ss)) >= 0 )
109             iommu_snoop = val;
110 #endif
111 #ifndef iommu_intremap
112         else if ( (val = parse_boolean("intremap", s, ss)) >= 0 )
113             iommu_intremap = val ? iommu_intremap_full : iommu_intremap_off;
114 #endif
115 #ifndef iommu_intpost
116         else if ( (val = parse_boolean("intpost", s, ss)) >= 0 )
117             iommu_intpost = val;
118 #endif
119 #ifdef CONFIG_KEXEC
120         else if ( (val = parse_boolean("crash-disable", s, ss)) >= 0 )
121             iommu_crash_disable = val;
122 #endif
123         else if ( (val = parse_boolean("debug", s, ss)) >= 0 )
124         {
125             iommu_debug = val;
126             if ( val )
127                 iommu_verbose = 1;
128         }
129         else if ( (val = parse_boolean("amd-iommu-perdev-intremap", s, ss)) >= 0 )
130 #ifdef CONFIG_AMD_IOMMU
131             amd_iommu_perdev_intremap = val;
132 #else
133             no_config_param("AMD_IOMMU", "iommu", s, ss);
134 #endif
135         else if ( (val = parse_boolean("dom0-passthrough", s, ss)) >= 0 )
136             iommu_hwdom_passthrough = val;
137         else if ( (val = parse_boolean("dom0-strict", s, ss)) >= 0 )
138             iommu_hwdom_strict = val;
139 #ifndef iommu_hap_pt_share
140         else if ( (val = parse_boolean("sharept", s, ss)) >= 0 )
141             iommu_hap_pt_share = val;
142 #endif
143         else
144             rc = -EINVAL;
145 
146         s = ss + 1;
147     } while ( *ss );
148 
149     return rc;
150 }
151 custom_param("iommu", parse_iommu_param);
152 
parse_dom0_iommu_param(const char * s)153 static int __init cf_check parse_dom0_iommu_param(const char *s)
154 {
155     const char *ss;
156     int rc = 0;
157 
158     do {
159         int val;
160 
161         ss = strchr(s, ',');
162         if ( !ss )
163             ss = strchr(s, '\0');
164 
165         if ( (val = parse_boolean("passthrough", s, ss)) >= 0 )
166             iommu_hwdom_passthrough = val;
167         else if ( (val = parse_boolean("strict", s, ss)) >= 0 )
168             iommu_hwdom_strict = val;
169         else if ( (val = parse_boolean("map-inclusive", s, ss)) >= 0 )
170             iommu_hwdom_inclusive = val;
171         else if ( (val = parse_boolean("map-reserved", s, ss)) >= 0 )
172             iommu_hwdom_reserved = val;
173         else if ( !cmdline_strcmp(s, "none") )
174             iommu_hwdom_none = true;
175         else
176             rc = -EINVAL;
177 
178         s = ss + 1;
179     } while ( *ss );
180 
181     return rc;
182 }
183 custom_param("dom0-iommu", parse_dom0_iommu_param);
184 
check_hwdom_reqs(struct domain * d)185 static void __hwdom_init check_hwdom_reqs(struct domain *d)
186 {
187     if ( iommu_hwdom_none || !is_hvm_domain(d) )
188         return;
189 
190     iommu_hwdom_passthrough = false;
191     iommu_hwdom_strict = true;
192 
193     arch_iommu_check_autotranslated_hwdom(d);
194 }
195 
iommu_domain_init(struct domain * d,unsigned int opts)196 int iommu_domain_init(struct domain *d, unsigned int opts)
197 {
198     struct domain_iommu *hd = dom_iommu(d);
199     int ret = 0;
200 
201     if ( is_hardware_domain(d) )
202         check_hwdom_reqs(d); /* may modify iommu_hwdom_strict */
203 
204     if ( !is_iommu_enabled(d) )
205         return 0;
206 
207 #ifdef CONFIG_NUMA
208     hd->node = NUMA_NO_NODE;
209 #endif
210 
211     ret = arch_iommu_domain_init(d);
212     if ( ret )
213         return ret;
214 
215     hd->platform_ops = iommu_get_ops();
216     ret = iommu_call(hd->platform_ops, init, d);
217     if ( ret || is_system_domain(d) )
218         return ret;
219 
220     /*
221      * Use shared page tables for HAP and IOMMU if the global option
222      * is enabled (from which we can infer the h/w is capable) and
223      * the domain options do not disallow it. HAP must, of course, also
224      * be enabled.
225      */
226     hd->hap_pt_share = hap_enabled(d) && iommu_hap_pt_share &&
227         !(opts & XEN_DOMCTL_IOMMU_no_sharept);
228 
229     /*
230      * NB: 'relaxed' h/w domains don't need the IOMMU mappings to be kept
231      *     in-sync with their assigned pages because all host RAM will be
232      *     mapped during hwdom_init().
233      */
234     if ( !is_hardware_domain(d) || iommu_hwdom_strict )
235         hd->need_sync = !iommu_use_hap_pt(d);
236 
237     ASSERT(!(hd->need_sync && hd->hap_pt_share));
238 
239     return 0;
240 }
241 
iommu_dump_page_tables(unsigned char key)242 static void cf_check iommu_dump_page_tables(unsigned char key)
243 {
244     struct domain *d;
245 
246     ASSERT(iommu_enabled);
247 
248     rcu_read_lock(&domlist_read_lock);
249 
250     for_each_domain(d)
251     {
252         if ( is_hardware_domain(d) || !is_iommu_enabled(d) )
253             continue;
254 
255         if ( iommu_use_hap_pt(d) )
256         {
257             printk("%pd sharing page tables\n", d);
258             continue;
259         }
260 
261         iommu_vcall(dom_iommu(d)->platform_ops, dump_page_tables, d);
262     }
263 
264     rcu_read_unlock(&domlist_read_lock);
265 }
266 
iommu_hwdom_init(struct domain * d)267 void __hwdom_init iommu_hwdom_init(struct domain *d)
268 {
269     struct domain_iommu *hd = dom_iommu(d);
270 
271     if ( !is_iommu_enabled(d) )
272         return;
273 
274     iommu_vcall(hd->platform_ops, hwdom_init, d);
275 }
276 
iommu_teardown(struct domain * d)277 static void iommu_teardown(struct domain *d)
278 {
279     struct domain_iommu *hd = dom_iommu(d);
280 
281     /*
282      * During early domain creation failure, we may reach here with the
283      * ops not yet initialized.
284      */
285     if ( !hd->platform_ops )
286         return;
287 
288     iommu_vcall(hd->platform_ops, teardown, d);
289 }
290 
iommu_domain_destroy(struct domain * d)291 void iommu_domain_destroy(struct domain *d)
292 {
293     if ( !is_iommu_enabled(d) )
294         return;
295 
296     iommu_teardown(d);
297 
298     arch_iommu_domain_destroy(d);
299 }
300 
mapping_order(const struct domain_iommu * hd,dfn_t dfn,mfn_t mfn,unsigned long nr)301 static unsigned int mapping_order(const struct domain_iommu *hd,
302                                   dfn_t dfn, mfn_t mfn, unsigned long nr)
303 {
304     unsigned long res = dfn_x(dfn) | mfn_x(mfn);
305     unsigned long sizes = hd->platform_ops->page_sizes;
306     unsigned int bit = ffsl(sizes) - 1, order = 0;
307 
308     ASSERT(bit == PAGE_SHIFT);
309 
310     while ( (sizes = (sizes >> bit) & ~1) )
311     {
312         unsigned long mask;
313 
314         bit = ffsl(sizes) - 1;
315         mask = (1UL << bit) - 1;
316         if ( nr <= mask || (res & mask) )
317             break;
318         order += bit;
319         nr >>= bit;
320         res >>= bit;
321     }
322 
323     return order;
324 }
325 
iommu_map(struct domain * d,dfn_t dfn0,mfn_t mfn0,unsigned long page_count,unsigned int flags,unsigned int * flush_flags)326 long iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
327                unsigned long page_count, unsigned int flags,
328                unsigned int *flush_flags)
329 {
330     const struct domain_iommu *hd = dom_iommu(d);
331     unsigned long i;
332     unsigned int order, j = 0;
333     int rc = 0;
334 
335     if ( !is_iommu_enabled(d) )
336         return 0;
337 
338     ASSERT(!IOMMUF_order(flags));
339 
340     for ( i = 0; i < page_count; i += 1UL << order )
341     {
342         dfn_t dfn = dfn_add(dfn0, i);
343         mfn_t mfn = mfn_add(mfn0, i);
344 
345         order = mapping_order(hd, dfn, mfn, page_count - i);
346 
347         if ( (flags & IOMMUF_preempt) &&
348              ((!(++j & 0xfff) && general_preempt_check()) ||
349               i > LONG_MAX - (1UL << order)) )
350             return i;
351 
352         rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
353                         flags | IOMMUF_order(order), flush_flags);
354 
355         if ( likely(!rc) )
356             continue;
357 
358         if ( !d->is_shutting_down && printk_ratelimit() )
359             printk(XENLOG_ERR
360                    "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
361                    d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
362 
363         /* while statement to satisfy __must_check */
364         while ( iommu_unmap(d, dfn0, i, 0, flush_flags) )
365             break;
366 
367         if ( !is_hardware_domain(d) )
368             domain_crash(d);
369 
370         break;
371     }
372 
373     /*
374      * Something went wrong so, if we were dealing with more than a single
375      * page, flush everything and clear flush flags.
376      */
377     if ( page_count > 1 && unlikely(rc) &&
378          !iommu_iotlb_flush_all(d, *flush_flags) )
379         *flush_flags = 0;
380 
381     return rc;
382 }
383 
iommu_legacy_map(struct domain * d,dfn_t dfn,mfn_t mfn,unsigned long page_count,unsigned int flags)384 int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
385                      unsigned long page_count, unsigned int flags)
386 {
387     unsigned int flush_flags = 0;
388     int rc;
389 
390     ASSERT(!(flags & IOMMUF_preempt));
391     rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
392 
393     if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
394         rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
395 
396     return rc;
397 }
398 
iommu_unmap(struct domain * d,dfn_t dfn0,unsigned long page_count,unsigned int flags,unsigned int * flush_flags)399 long iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
400                  unsigned int flags, unsigned int *flush_flags)
401 {
402     const struct domain_iommu *hd = dom_iommu(d);
403     unsigned long i;
404     unsigned int order, j = 0;
405     int rc = 0;
406 
407     if ( !is_iommu_enabled(d) )
408         return 0;
409 
410     ASSERT(!(flags & ~IOMMUF_preempt));
411 
412     for ( i = 0; i < page_count; i += 1UL << order )
413     {
414         dfn_t dfn = dfn_add(dfn0, i);
415         int err;
416 
417         order = mapping_order(hd, dfn, _mfn(0), page_count - i);
418 
419         if ( (flags & IOMMUF_preempt) &&
420              ((!(++j & 0xfff) && general_preempt_check()) ||
421               i > LONG_MAX - (1UL << order)) )
422             return i;
423 
424         err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
425                          flags | IOMMUF_order(order), flush_flags);
426 
427         if ( likely(!err) )
428             continue;
429 
430         if ( !d->is_shutting_down && printk_ratelimit() )
431             printk(XENLOG_ERR
432                    "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
433                    d->domain_id, dfn_x(dfn), err);
434 
435         if ( !rc )
436             rc = err;
437 
438         if ( !is_hardware_domain(d) )
439         {
440             domain_crash(d);
441             break;
442         }
443     }
444 
445     /*
446      * Something went wrong so, if we were dealing with more than a single
447      * page, flush everything and clear flush flags.
448      */
449     if ( page_count > 1 && unlikely(rc) &&
450          !iommu_iotlb_flush_all(d, *flush_flags) )
451         *flush_flags = 0;
452 
453     return rc;
454 }
455 
iommu_legacy_unmap(struct domain * d,dfn_t dfn,unsigned long page_count)456 int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long page_count)
457 {
458     unsigned int flush_flags = 0;
459     int rc = iommu_unmap(d, dfn, page_count, 0, &flush_flags);
460 
461     if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
462         rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
463 
464     return rc;
465 }
466 
iommu_lookup_page(struct domain * d,dfn_t dfn,mfn_t * mfn,unsigned int * flags)467 int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
468                       unsigned int *flags)
469 {
470     const struct domain_iommu *hd = dom_iommu(d);
471 
472     if ( !is_iommu_enabled(d) || !hd->platform_ops->lookup_page )
473         return -EOPNOTSUPP;
474 
475     return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
476 }
477 
iommu_iotlb_flush(struct domain * d,dfn_t dfn,unsigned long page_count,unsigned int flush_flags)478 int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned long page_count,
479                       unsigned int flush_flags)
480 {
481     const struct domain_iommu *hd = dom_iommu(d);
482     int rc;
483 
484     if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush ||
485          !page_count || !flush_flags )
486         return 0;
487 
488     if ( dfn_eq(dfn, INVALID_DFN) )
489         return -EINVAL;
490 
491     rc = iommu_call(hd->platform_ops, iotlb_flush, d, dfn, page_count,
492                     flush_flags);
493     if ( unlikely(rc) )
494     {
495         if ( !d->is_shutting_down && printk_ratelimit() )
496             printk(XENLOG_ERR
497                    "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %lu flags %x\n",
498                    d->domain_id, rc, dfn_x(dfn), page_count, flush_flags);
499 
500         if ( !is_hardware_domain(d) )
501             domain_crash(d);
502     }
503 
504     return rc;
505 }
506 
iommu_iotlb_flush_all(struct domain * d,unsigned int flush_flags)507 int iommu_iotlb_flush_all(struct domain *d, unsigned int flush_flags)
508 {
509     const struct domain_iommu *hd = dom_iommu(d);
510     int rc;
511 
512     if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush ||
513          !flush_flags )
514         return 0;
515 
516     rc = iommu_call(hd->platform_ops, iotlb_flush, d, INVALID_DFN, 0,
517                     flush_flags | IOMMU_FLUSHF_all);
518     if ( unlikely(rc) )
519     {
520         if ( !d->is_shutting_down && printk_ratelimit() )
521             printk(XENLOG_ERR
522                    "d%d: IOMMU IOTLB flush all failed: %d\n",
523                    d->domain_id, rc);
524 
525         if ( !is_hardware_domain(d) )
526             domain_crash(d);
527     }
528 
529     return rc;
530 }
531 
iommu_quarantine_dev_init(device_t * dev)532 int iommu_quarantine_dev_init(device_t *dev)
533 {
534     const struct domain_iommu *hd = dom_iommu(dom_io);
535 
536     if ( !iommu_quarantine || !hd->platform_ops->quarantine_init )
537         return 0;
538 
539     return iommu_call(hd->platform_ops, quarantine_init,
540                       dev, iommu_quarantine == IOMMU_quarantine_scratch_page);
541 }
542 
iommu_quarantine_init(void)543 static int __init iommu_quarantine_init(void)
544 {
545     dom_io->options |= XEN_DOMCTL_CDF_iommu;
546 
547     return iommu_domain_init(dom_io, 0);
548 }
549 
iommu_setup(void)550 int __init iommu_setup(void)
551 {
552     int rc = -ENODEV;
553     bool force_intremap = force_iommu && iommu_intremap;
554 
555     if ( iommu_hwdom_strict )
556         iommu_hwdom_passthrough = false;
557 
558     if ( iommu_enable )
559     {
560         const struct iommu_ops *ops = NULL;
561 
562         rc = iommu_hardware_setup();
563         if ( !rc )
564             ops = iommu_get_ops();
565         if ( ops && (ISOLATE_LSB(ops->page_sizes)) != PAGE_SIZE )
566         {
567             printk(XENLOG_ERR "IOMMU: page size mask %lx unsupported\n",
568                    ops->page_sizes);
569             rc = ops->page_sizes ? -EPERM : -ENODATA;
570         }
571         iommu_enabled = (rc == 0);
572     }
573 
574 #ifndef iommu_intremap
575     if ( !iommu_enabled )
576         iommu_intremap = iommu_intremap_off;
577 #endif
578 
579     if ( (force_iommu && !iommu_enabled) ||
580          (force_intremap && !iommu_intremap) )
581         panic("Couldn't enable %s and iommu=required/force\n",
582               !iommu_enabled ? "IOMMU" : "Interrupt Remapping");
583 
584 #ifndef iommu_intpost
585     if ( !iommu_intremap )
586         iommu_intpost = false;
587 #endif
588 
589     printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
590     if ( !iommu_enabled )
591     {
592         iommu_hwdom_passthrough = false;
593         iommu_hwdom_strict = false;
594     }
595     else
596     {
597         if ( iommu_quarantine_init() )
598             panic("Could not set up quarantine\n");
599 
600         printk(" - Dom0 mode: %s\n",
601                iommu_hwdom_passthrough ? "Passthrough" :
602                iommu_hwdom_strict ? "Strict" : "Relaxed");
603 #ifndef iommu_intremap
604         printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis");
605 #endif
606 
607         register_keyhandler('o', &iommu_dump_page_tables,
608                             "dump iommu page tables", 0);
609     }
610 
611     return rc;
612 }
613 
iommu_suspend(void)614 int iommu_suspend(void)
615 {
616     if ( iommu_enabled )
617         return iommu_call(iommu_get_ops(), suspend);
618 
619     return 0;
620 }
621 
iommu_resume(void)622 void iommu_resume(void)
623 {
624     if ( iommu_enabled )
625         iommu_vcall(iommu_get_ops(), resume);
626 }
627 
iommu_do_domctl(struct xen_domctl * domctl,struct domain * d,XEN_GUEST_HANDLE_PARAM (xen_domctl_t)u_domctl)628 int iommu_do_domctl(
629     struct xen_domctl *domctl, struct domain *d,
630     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
631 {
632     int ret = -ENODEV;
633 
634     if ( !(d ? is_iommu_enabled(d) : iommu_enabled) )
635         return -EOPNOTSUPP;
636 
637 #ifdef CONFIG_HAS_PCI
638     ret = iommu_do_pci_domctl(domctl, d, u_domctl);
639 #endif
640 
641 #ifdef CONFIG_HAS_DEVICE_TREE_DISCOVERY
642     if ( ret == -ENODEV )
643         ret = iommu_do_dt_domctl(domctl, d, u_domctl);
644 #endif
645 
646     return ret;
647 }
648 
iommu_crash_shutdown(void)649 void iommu_crash_shutdown(void)
650 {
651     if ( !iommu_crash_disable )
652         return;
653 
654     if ( iommu_enabled )
655         iommu_vcall(iommu_get_ops(), crash_shutdown);
656 
657     iommu_enabled = false;
658 #ifndef iommu_intremap
659     iommu_intremap = iommu_intremap_off;
660 #endif
661 #ifndef iommu_intpost
662     iommu_intpost = false;
663 #endif
664 }
665 
iommu_quiesce(void)666 void iommu_quiesce(void)
667 {
668     const struct iommu_ops *ops;
669 
670     if ( !iommu_enabled )
671         return;
672 
673     ops = iommu_get_ops();
674     if ( ops->quiesce )
675         iommu_vcall(ops, quiesce);
676 }
677 
iommu_get_reserved_device_memory(iommu_grdm_t * func,void * ctxt)678 int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
679 {
680     const struct iommu_ops *ops;
681 
682     if ( !iommu_enabled )
683         return 0;
684 
685     ops = iommu_get_ops();
686     if ( !ops->get_reserved_device_memory )
687         return 0;
688 
689     return iommu_call(ops, get_reserved_device_memory, func, ctxt);
690 }
691 
iommu_has_feature(struct domain * d,enum iommu_feature feature)692 bool iommu_has_feature(struct domain *d, enum iommu_feature feature)
693 {
694     return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)->features);
695 }
696 
697 #define MAX_EXTRA_RESERVED_RANGES 20
698 struct extra_reserved_range {
699     unsigned long start;
700     unsigned long nr;
701     pci_sbdf_t sbdf;
702     const char *name;
703 };
704 static unsigned int __initdata nr_extra_reserved_ranges;
705 static struct extra_reserved_range __initdata
706     extra_reserved_ranges[MAX_EXTRA_RESERVED_RANGES];
707 
iommu_add_extra_reserved_device_memory(unsigned long start,unsigned long nr,pci_sbdf_t sbdf,const char * name)708 int __init iommu_add_extra_reserved_device_memory(unsigned long start,
709                                                   unsigned long nr,
710                                                   pci_sbdf_t sbdf,
711                                                   const char *name)
712 {
713     unsigned int idx;
714 
715     if ( nr_extra_reserved_ranges >= MAX_EXTRA_RESERVED_RANGES )
716         return -ENOMEM;
717 
718     idx = nr_extra_reserved_ranges++;
719     extra_reserved_ranges[idx].start = start;
720     extra_reserved_ranges[idx].nr = nr;
721     extra_reserved_ranges[idx].sbdf = sbdf;
722     extra_reserved_ranges[idx].name = name;
723 
724     return 0;
725 }
726 
iommu_get_extra_reserved_device_memory(iommu_grdm_t * func,void * ctxt)727 int __init iommu_get_extra_reserved_device_memory(iommu_grdm_t *func,
728                                                   void *ctxt)
729 {
730     unsigned int idx;
731     int ret;
732 
733     for ( idx = 0; idx < nr_extra_reserved_ranges; idx++ )
734     {
735 #ifdef CONFIG_X86
736         paddr_t start = pfn_to_paddr(extra_reserved_ranges[idx].start);
737         paddr_t end = pfn_to_paddr(extra_reserved_ranges[idx].start +
738                                    extra_reserved_ranges[idx].nr);
739 
740         if ( !reserve_e820_ram(&e820, start, end) )
741         {
742             printk(XENLOG_ERR "Failed to reserve [%"PRIx64"-%"PRIx64") for %s, "
743                    "skipping IOMMU mapping for it, some functionality may be broken\n",
744                    start, end, extra_reserved_ranges[idx].name);
745             continue;
746         }
747 #endif
748         ret = func(extra_reserved_ranges[idx].start,
749                    extra_reserved_ranges[idx].nr,
750                    extra_reserved_ranges[idx].sbdf.sbdf,
751                    ctxt);
752         if ( ret < 0 )
753             return ret;
754     }
755 
756     return 0;
757 }
758 
759 /*
760  * Local variables:
761  * mode: C
762  * c-file-style: "BSD"
763  * c-basic-offset: 4
764  * indent-tabs-mode: nil
765  * End:
766  */
767