1 /*
2  * This program is free software; you can redistribute it and/or modify it
3  * under the terms and conditions of the GNU General Public License,
4  * version 2, as published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope it will be useful, but WITHOUT
7  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
9  * more details.
10  *
11  * You should have received a copy of the GNU General Public License along with
12  * this program; If not, see <http://www.gnu.org/licenses/>.
13  */
14 
15 #include <xen/sched.h>
16 #include <xen/iommu.h>
17 #include <xen/paging.h>
18 #include <xen/guest_access.h>
19 #include <xen/event.h>
20 #include <xen/softirq.h>
21 #include <xsm/xsm.h>
22 
iommu_update_ire_from_apic(unsigned int apic,unsigned int reg,unsigned int value)23 void iommu_update_ire_from_apic(
24     unsigned int apic, unsigned int reg, unsigned int value)
25 {
26     const struct iommu_ops *ops = iommu_get_ops();
27     ops->update_ire_from_apic(apic, reg, value);
28 }
29 
iommu_read_apic_from_ire(unsigned int apic,unsigned int reg)30 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
31 {
32     const struct iommu_ops *ops = iommu_get_ops();
33     return ops->read_apic_from_ire(apic, reg);
34 }
35 
iommu_setup_hpet_msi(struct msi_desc * msi)36 int __init iommu_setup_hpet_msi(struct msi_desc *msi)
37 {
38     const struct iommu_ops *ops = iommu_get_ops();
39     return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV;
40 }
41 
arch_iommu_populate_page_table(struct domain * d)42 int arch_iommu_populate_page_table(struct domain *d)
43 {
44     const struct domain_iommu *hd = dom_iommu(d);
45     struct page_info *page;
46     int rc = 0, n = 0;
47 
48     d->need_iommu = -1;
49 
50     this_cpu(iommu_dont_flush_iotlb) = 1;
51     spin_lock(&d->page_alloc_lock);
52 
53     if ( unlikely(d->is_dying) )
54         rc = -ESRCH;
55 
56     while ( !rc && (page = page_list_remove_head(&d->page_list)) )
57     {
58         if ( is_hvm_domain(d) ||
59             (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
60         {
61             unsigned long mfn = page_to_mfn(page);
62             unsigned long gfn = mfn_to_gmfn(d, mfn);
63 
64             if ( gfn != gfn_x(INVALID_GFN) )
65             {
66                 ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
67                 BUG_ON(SHARED_M2P(gfn));
68                 rc = hd->platform_ops->map_page(d, gfn, mfn,
69                                                 IOMMUF_readable |
70                                                 IOMMUF_writable);
71             }
72             if ( rc )
73             {
74                 page_list_add(page, &d->page_list);
75                 break;
76             }
77         }
78         page_list_add_tail(page, &d->arch.relmem_list);
79         if ( !(++n & 0xff) && !page_list_empty(&d->page_list) &&
80              hypercall_preempt_check() )
81             rc = -ERESTART;
82     }
83 
84     if ( !rc )
85     {
86         /*
87          * The expectation here is that generally there are many normal pages
88          * on relmem_list (the ones we put there) and only few being in an
89          * offline/broken state. The latter ones are always at the head of the
90          * list. Hence we first move the whole list, and then move back the
91          * first few entries.
92          */
93         page_list_move(&d->page_list, &d->arch.relmem_list);
94         while ( !page_list_empty(&d->page_list) &&
95                 (page = page_list_first(&d->page_list),
96                  (page->count_info & (PGC_state|PGC_broken))) )
97         {
98             page_list_del(page, &d->page_list);
99             page_list_add_tail(page, &d->arch.relmem_list);
100         }
101     }
102 
103     spin_unlock(&d->page_alloc_lock);
104     this_cpu(iommu_dont_flush_iotlb) = 0;
105 
106     if ( !rc )
107         rc = iommu_iotlb_flush_all(d);
108 
109     if ( rc && rc != -ERESTART )
110         iommu_teardown(d);
111 
112     return rc;
113 }
114 
arch_iommu_check_autotranslated_hwdom(struct domain * d)115 void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d)
116 {
117     if ( !iommu_enabled )
118         panic("Presently, iommu must be enabled for PVH hardware domain\n");
119 }
120 
arch_iommu_domain_init(struct domain * d)121 int arch_iommu_domain_init(struct domain *d)
122 {
123     struct domain_iommu *hd = dom_iommu(d);
124 
125     spin_lock_init(&hd->arch.mapping_lock);
126     INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
127 
128     return 0;
129 }
130 
arch_iommu_domain_destroy(struct domain * d)131 void arch_iommu_domain_destroy(struct domain *d)
132 {
133 }
134 
135 /*
136  * Local variables:
137  * mode: C
138  * c-file-style: "BSD"
139  * c-basic-offset: 4
140  * indent-tabs-mode: nil
141  * End:
142  */
143