1 /*
2  * This program is free software; you can redistribute it and/or modify it
3  * under the terms and conditions of the GNU General Public License,
4  * version 2, as published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope it will be useful, but WITHOUT
7  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9  * more details.
10  *
11  * You should have received a copy of the GNU General Public License along with
12  * this program; If not, see <http://www.gnu.org/licenses/>.
13  */
14 
15 #include <xen/sched.h>
16 #include <xen/iommu.h>
17 #include <xen/paging.h>
18 #include <xen/guest_access.h>
19 #include <xen/event.h>
20 #include <xen/softirq.h>
21 #include <xen/keyhandler.h>
22 #include <xsm/xsm.h>
23 
24 static int parse_iommu_param(const char *s);
25 static void iommu_dump_p2m_table(unsigned char key);
26 
27 unsigned int __read_mostly iommu_dev_iotlb_timeout = 1000;
28 integer_param("iommu_dev_iotlb_timeout", iommu_dev_iotlb_timeout);
29 
30 /*
31  * The 'iommu' parameter enables the IOMMU.  Optional comma separated
32  * value may contain:
33  *
34  *   off|no|false|disable       Disable IOMMU (default)
35  *   force|required             Don't boot unless IOMMU is enabled
36  *   no-intremap                Disable interrupt remapping
37  *   intpost                    Enable VT-d Interrupt posting
38  *   verbose                    Be more verbose
39  *   debug                      Enable debugging messages and checks
40  *   workaround_bios_bug        Workaround some bios issue to still enable
41  *                              VT-d, don't guarantee security
42  *   dom0-passthrough           No DMA translation at all for Dom0
43  *   dom0-strict                No 1:1 memory mapping for Dom0
44  *   no-sharept                 Don't share VT-d and EPT page tables
45  *   no-snoop                   Disable VT-d Snoop Control
46  *   no-qinval                  Disable VT-d Queued Invalidation
47  *   no-igfx                    Disable VT-d for IGD devices (insecure)
48  *   no-amd-iommu-perdev-intremap Don't use per-device interrupt remapping
49  *                              tables (insecure)
50  */
51 custom_param("iommu", parse_iommu_param);
52 bool_t __initdata iommu_enable = 1;
53 bool_t __read_mostly iommu_enabled;
54 bool_t __read_mostly force_iommu;
55 bool_t __hwdom_initdata iommu_dom0_strict;
56 bool_t __read_mostly iommu_verbose;
57 bool_t __read_mostly iommu_workaround_bios_bug;
58 bool_t __read_mostly iommu_igfx = 1;
59 bool_t __read_mostly iommu_passthrough;
60 bool_t __read_mostly iommu_snoop = 1;
61 bool_t __read_mostly iommu_qinval = 1;
62 bool_t __read_mostly iommu_intremap = 1;
63 
64 /*
65  * In the current implementation of VT-d posted interrupts, in some extreme
66  * cases, the per cpu list which saves the blocked vCPU will be very long,
67  * and this will affect the interrupt latency, so let this feature off by
68  * default until we find a good solution to resolve it.
69  */
70 bool_t __read_mostly iommu_intpost;
71 bool_t __read_mostly iommu_hap_pt_share = 1;
72 bool_t __read_mostly iommu_debug;
73 bool_t __read_mostly amd_iommu_perdev_intremap = 1;
74 
75 DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
76 
77 DEFINE_SPINLOCK(iommu_pt_cleanup_lock);
78 PAGE_LIST_HEAD(iommu_pt_cleanup_list);
79 static struct tasklet iommu_pt_cleanup_tasklet;
80 
parse_iommu_param(const char * s)81 static int __init parse_iommu_param(const char *s)
82 {
83     const char *ss;
84     int val, b, rc = 0;
85 
86     do {
87         val = !!strncmp(s, "no-", 3);
88         if ( !val )
89             s += 3;
90 
91         ss = strchr(s, ',');
92         if ( !ss )
93             ss = strchr(s, '\0');
94 
95         b = parse_bool(s, ss);
96         if ( b >= 0 )
97             iommu_enable = b;
98         else if ( !strncmp(s, "force", ss - s) ||
99                   !strncmp(s, "required", ss - s) )
100             force_iommu = val;
101         else if ( !strncmp(s, "workaround_bios_bug", ss - s) )
102             iommu_workaround_bios_bug = val;
103         else if ( !strncmp(s, "igfx", ss - s) )
104             iommu_igfx = val;
105         else if ( !strncmp(s, "verbose", ss - s) )
106             iommu_verbose = val;
107         else if ( !strncmp(s, "snoop", ss - s) )
108             iommu_snoop = val;
109         else if ( !strncmp(s, "qinval", ss - s) )
110             iommu_qinval = val;
111         else if ( !strncmp(s, "intremap", ss - s) )
112             iommu_intremap = val;
113         else if ( !strncmp(s, "intpost", ss - s) )
114             iommu_intpost = val;
115         else if ( !strncmp(s, "debug", ss - s) )
116         {
117             iommu_debug = val;
118             if ( val )
119                 iommu_verbose = 1;
120         }
121         else if ( !strncmp(s, "amd-iommu-perdev-intremap", ss - s) )
122             amd_iommu_perdev_intremap = val;
123         else if ( !strncmp(s, "dom0-passthrough", ss - s) )
124             iommu_passthrough = val;
125         else if ( !strncmp(s, "dom0-strict", ss - s) )
126             iommu_dom0_strict = val;
127         else if ( !strncmp(s, "sharept", ss - s) )
128             iommu_hap_pt_share = val;
129         else
130             rc = -EINVAL;
131 
132         s = ss + 1;
133     } while ( *ss );
134 
135     return rc;
136 }
137 
iommu_domain_init(struct domain * d)138 int iommu_domain_init(struct domain *d)
139 {
140     struct domain_iommu *hd = dom_iommu(d);
141     int ret = 0;
142 
143     ret = arch_iommu_domain_init(d);
144     if ( ret )
145         return ret;
146 
147     if ( !iommu_enabled )
148         return 0;
149 
150     hd->platform_ops = iommu_get_ops();
151     return hd->platform_ops->init(d);
152 }
153 
check_hwdom_reqs(struct domain * d)154 static void __hwdom_init check_hwdom_reqs(struct domain *d)
155 {
156     if ( !paging_mode_translate(d) )
157         return;
158 
159     arch_iommu_check_autotranslated_hwdom(d);
160 
161     if ( iommu_passthrough )
162         panic("Dom0 uses paging translated mode, dom0-passthrough must not be "
163               "enabled\n");
164 
165     iommu_dom0_strict = 1;
166 }
167 
iommu_hwdom_init(struct domain * d)168 void __hwdom_init iommu_hwdom_init(struct domain *d)
169 {
170     const struct domain_iommu *hd = dom_iommu(d);
171 
172     check_hwdom_reqs(d);
173 
174     if ( !iommu_enabled )
175         return;
176 
177     register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
178     d->need_iommu = !!iommu_dom0_strict;
179     if ( need_iommu(d) && !iommu_use_hap_pt(d) )
180     {
181         struct page_info *page;
182         unsigned int i = 0;
183         int rc = 0;
184 
185         page_list_for_each ( page, &d->page_list )
186         {
187             unsigned long mfn = page_to_mfn(page);
188             unsigned long gfn = mfn_to_gmfn(d, mfn);
189             unsigned int mapping = IOMMUF_readable;
190             int ret;
191 
192             if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
193                  ((page->u.inuse.type_info & PGT_type_mask)
194                   == PGT_writable_page) )
195                 mapping |= IOMMUF_writable;
196 
197             ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
198             if ( !rc )
199                 rc = ret;
200 
201             if ( !(i++ & 0xfffff) )
202                 process_pending_softirqs();
203         }
204 
205         if ( rc )
206             printk(XENLOG_WARNING "d%d: IOMMU mapping failed: %d\n",
207                    d->domain_id, rc);
208     }
209 
210     return hd->platform_ops->hwdom_init(d);
211 }
212 
iommu_teardown(struct domain * d)213 void iommu_teardown(struct domain *d)
214 {
215     const struct domain_iommu *hd = dom_iommu(d);
216 
217     d->need_iommu = 0;
218     hd->platform_ops->teardown(d);
219     tasklet_schedule(&iommu_pt_cleanup_tasklet);
220 }
221 
iommu_construct(struct domain * d)222 int iommu_construct(struct domain *d)
223 {
224     if ( need_iommu(d) > 0 )
225         return 0;
226 
227     if ( !iommu_use_hap_pt(d) )
228     {
229         int rc;
230 
231         rc = arch_iommu_populate_page_table(d);
232         if ( rc )
233             return rc;
234     }
235 
236     d->need_iommu = 1;
237     /*
238      * There may be dirty cache lines when a device is assigned
239      * and before need_iommu(d) becoming true, this will cause
240      * memory_type_changed lose effect if memory type changes.
241      * Call memory_type_changed here to amend this.
242      */
243     memory_type_changed(d);
244 
245     return 0;
246 }
247 
iommu_domain_destroy(struct domain * d)248 void iommu_domain_destroy(struct domain *d)
249 {
250     if ( !iommu_enabled || !dom_iommu(d)->platform_ops )
251         return;
252 
253     iommu_teardown(d);
254 
255     arch_iommu_domain_destroy(d);
256 }
257 
iommu_map_page(struct domain * d,unsigned long gfn,unsigned long mfn,unsigned int flags)258 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
259                    unsigned int flags)
260 {
261     const struct domain_iommu *hd = dom_iommu(d);
262     int rc;
263 
264     if ( !iommu_enabled || !hd->platform_ops )
265         return 0;
266 
267     rc = hd->platform_ops->map_page(d, gfn, mfn, flags);
268     if ( unlikely(rc) )
269     {
270         if ( !d->is_shutting_down && printk_ratelimit() )
271             printk(XENLOG_ERR
272                    "d%d: IOMMU mapping gfn %#lx to mfn %#lx failed: %d\n",
273                    d->domain_id, gfn, mfn, rc);
274 
275         if ( !is_hardware_domain(d) )
276             domain_crash(d);
277     }
278 
279     return rc;
280 }
281 
iommu_unmap_page(struct domain * d,unsigned long gfn)282 int iommu_unmap_page(struct domain *d, unsigned long gfn)
283 {
284     const struct domain_iommu *hd = dom_iommu(d);
285     int rc;
286 
287     if ( !iommu_enabled || !hd->platform_ops )
288         return 0;
289 
290     rc = hd->platform_ops->unmap_page(d, gfn);
291     if ( unlikely(rc) )
292     {
293         if ( !d->is_shutting_down && printk_ratelimit() )
294             printk(XENLOG_ERR
295                    "d%d: IOMMU unmapping gfn %#lx failed: %d\n",
296                    d->domain_id, gfn, rc);
297 
298         if ( !is_hardware_domain(d) )
299             domain_crash(d);
300     }
301 
302     return rc;
303 }
304 
iommu_free_pagetables(unsigned long unused)305 static void iommu_free_pagetables(unsigned long unused)
306 {
307     do {
308         struct page_info *pg;
309 
310         spin_lock(&iommu_pt_cleanup_lock);
311         pg = page_list_remove_head(&iommu_pt_cleanup_list);
312         spin_unlock(&iommu_pt_cleanup_lock);
313         if ( !pg )
314             return;
315         iommu_get_ops()->free_page_table(pg);
316     } while ( !softirq_pending(smp_processor_id()) );
317 
318     tasklet_schedule_on_cpu(&iommu_pt_cleanup_tasklet,
319                             cpumask_cycle(smp_processor_id(), &cpu_online_map));
320 }
321 
iommu_iotlb_flush(struct domain * d,unsigned long gfn,unsigned int page_count)322 int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
323                       unsigned int page_count)
324 {
325     const struct domain_iommu *hd = dom_iommu(d);
326     int rc;
327 
328     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
329         return 0;
330 
331     rc = hd->platform_ops->iotlb_flush(d, gfn, page_count);
332     if ( unlikely(rc) )
333     {
334         if ( !d->is_shutting_down && printk_ratelimit() )
335             printk(XENLOG_ERR
336                    "d%d: IOMMU IOTLB flush failed: %d, gfn %#lx, page count %u\n",
337                    d->domain_id, rc, gfn, page_count);
338 
339         if ( !is_hardware_domain(d) )
340             domain_crash(d);
341     }
342 
343     return rc;
344 }
345 
iommu_iotlb_flush_all(struct domain * d)346 int iommu_iotlb_flush_all(struct domain *d)
347 {
348     const struct domain_iommu *hd = dom_iommu(d);
349     int rc;
350 
351     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
352         return 0;
353 
354     rc = hd->platform_ops->iotlb_flush_all(d);
355     if ( unlikely(rc) )
356     {
357         if ( !d->is_shutting_down && printk_ratelimit() )
358             printk(XENLOG_ERR
359                    "d%d: IOMMU IOTLB flush all failed: %d\n",
360                    d->domain_id, rc);
361 
362         if ( !is_hardware_domain(d) )
363             domain_crash(d);
364     }
365 
366     return rc;
367 }
368 
iommu_setup(void)369 int __init iommu_setup(void)
370 {
371     int rc = -ENODEV;
372     bool_t force_intremap = force_iommu && iommu_intremap;
373 
374     if ( iommu_dom0_strict )
375         iommu_passthrough = 0;
376 
377     if ( iommu_enable )
378     {
379         rc = iommu_hardware_setup();
380         iommu_enabled = (rc == 0);
381     }
382     if ( !iommu_enabled )
383         iommu_intremap = 0;
384 
385     if ( (force_iommu && !iommu_enabled) ||
386          (force_intremap && !iommu_intremap) )
387         panic("Couldn't enable %s and iommu=required/force",
388               !iommu_enabled ? "IOMMU" : "Interrupt Remapping");
389 
390     if ( !iommu_intremap )
391         iommu_intpost = 0;
392 
393     if ( !iommu_enabled )
394     {
395         iommu_snoop = 0;
396         iommu_passthrough = 0;
397         iommu_dom0_strict = 0;
398     }
399     printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
400     if ( iommu_enabled )
401     {
402         printk(" - Dom0 mode: %s\n",
403                iommu_passthrough ? "Passthrough" :
404                iommu_dom0_strict ? "Strict" : "Relaxed");
405         printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" : "dis");
406         tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0);
407     }
408 
409     return rc;
410 }
411 
iommu_suspend()412 int iommu_suspend()
413 {
414     if ( iommu_enabled )
415         return iommu_get_ops()->suspend();
416 
417     return 0;
418 }
419 
iommu_resume()420 void iommu_resume()
421 {
422     if ( iommu_enabled )
423         iommu_get_ops()->resume();
424 }
425 
iommu_do_domctl(struct xen_domctl * domctl,struct domain * d,XEN_GUEST_HANDLE_PARAM (xen_domctl_t)u_domctl)426 int iommu_do_domctl(
427     struct xen_domctl *domctl, struct domain *d,
428     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
429 {
430     int ret = -ENODEV;
431 
432     if ( !iommu_enabled )
433         return -ENOSYS;
434 
435 #ifdef CONFIG_HAS_PCI
436     ret = iommu_do_pci_domctl(domctl, d, u_domctl);
437 #endif
438 
439 #ifdef CONFIG_HAS_DEVICE_TREE
440     if ( ret == -ENODEV )
441         ret = iommu_do_dt_domctl(domctl, d, u_domctl);
442 #endif
443 
444     return ret;
445 }
446 
iommu_share_p2m_table(struct domain * d)447 void iommu_share_p2m_table(struct domain* d)
448 {
449     if ( iommu_enabled && iommu_use_hap_pt(d) )
450         iommu_get_ops()->share_p2m(d);
451 }
452 
iommu_crash_shutdown(void)453 void iommu_crash_shutdown(void)
454 {
455     if ( iommu_enabled )
456         iommu_get_ops()->crash_shutdown();
457     iommu_enabled = iommu_intremap = iommu_intpost = 0;
458 }
459 
iommu_get_reserved_device_memory(iommu_grdm_t * func,void * ctxt)460 int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
461 {
462     const struct iommu_ops *ops;
463 
464     if ( !iommu_enabled )
465         return 0;
466 
467     ops = iommu_get_ops();
468     if ( !ops->get_reserved_device_memory )
469         return 0;
470 
471     return ops->get_reserved_device_memory(func, ctxt);
472 }
473 
iommu_has_feature(struct domain * d,enum iommu_feature feature)474 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
475 {
476     if ( !iommu_enabled )
477         return 0;
478 
479     return test_bit(feature, dom_iommu(d)->features);
480 }
481 
iommu_dump_p2m_table(unsigned char key)482 static void iommu_dump_p2m_table(unsigned char key)
483 {
484     struct domain *d;
485     const struct iommu_ops *ops;
486 
487     if ( !iommu_enabled )
488     {
489         printk("IOMMU not enabled!\n");
490         return;
491     }
492 
493     ops = iommu_get_ops();
494     for_each_domain(d)
495     {
496         if ( is_hardware_domain(d) || need_iommu(d) <= 0 )
497             continue;
498 
499         if ( iommu_use_hap_pt(d) )
500         {
501             printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d->domain_id);
502             continue;
503         }
504 
505         printk("\ndomain%d IOMMU p2m table: \n", d->domain_id);
506         ops->dump_p2m_table(d);
507     }
508 }
509 
510 /*
511  * Local variables:
512  * mode: C
513  * c-file-style: "BSD"
514  * c-basic-offset: 4
515  * indent-tabs-mode: nil
516  * End:
517  */
518