1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
17 */
18
19 #include <xen/sched.h>
20 #include <xen/delay.h>
21 #include <xen/iommu.h>
22 #include <xen/time.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include "iommu.h"
26 #include "dmar.h"
27 #include "vtd.h"
28 #include "extern.h"
29 #include <asm/io_apic.h>
30
31 /* Disable vt-d protected memory registers. */
disable_pmr(struct iommu * iommu)32 void disable_pmr(struct iommu *iommu)
33 {
34 u32 val;
35 unsigned long flags;
36
37 val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
38 if ( !(val & DMA_PMEN_PRS) )
39 return;
40
41 spin_lock_irqsave(&iommu->register_lock, flags);
42 dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
43
44 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl,
45 !(val & DMA_PMEN_PRS), val);
46 spin_unlock_irqrestore(&iommu->register_lock, flags);
47
48 dprintk(XENLOG_INFO VTDPREFIX,
49 "Disabled protected memory registers\n");
50 }
51
print_iommu_regs(struct acpi_drhd_unit * drhd)52 void print_iommu_regs(struct acpi_drhd_unit *drhd)
53 {
54 struct iommu *iommu = drhd->iommu;
55 u64 cap;
56
57 printk("---- print_iommu_regs ----\n");
58 printk(" drhd->address = %"PRIx64"\n", drhd->address);
59 printk(" VER = %x\n", dmar_readl(iommu->reg, DMAR_VER_REG));
60 printk(" CAP = %"PRIx64"\n", cap = dmar_readq(iommu->reg, DMAR_CAP_REG));
61 printk(" n_fault_reg = %"PRIx64"\n", cap_num_fault_regs(cap));
62 printk(" fault_recording_offset = %"PRIx64"\n", cap_fault_reg_offset(cap));
63 if ( cap_fault_reg_offset(cap) < PAGE_SIZE )
64 {
65 printk(" fault_recording_reg_l = %"PRIx64"\n",
66 dmar_readq(iommu->reg, cap_fault_reg_offset(cap)));
67 printk(" fault_recording_reg_h = %"PRIx64"\n",
68 dmar_readq(iommu->reg, cap_fault_reg_offset(cap) + 8));
69 }
70 printk(" ECAP = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_ECAP_REG));
71 printk(" GCMD = %x\n", dmar_readl(iommu->reg, DMAR_GCMD_REG));
72 printk(" GSTS = %x\n", dmar_readl(iommu->reg, DMAR_GSTS_REG));
73 printk(" RTADDR = %"PRIx64"\n", dmar_readq(iommu->reg,DMAR_RTADDR_REG));
74 printk(" CCMD = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_CCMD_REG));
75 printk(" FSTS = %x\n", dmar_readl(iommu->reg, DMAR_FSTS_REG));
76 printk(" FECTL = %x\n", dmar_readl(iommu->reg, DMAR_FECTL_REG));
77 printk(" FEDATA = %x\n", dmar_readl(iommu->reg, DMAR_FEDATA_REG));
78 printk(" FEADDR = %x\n", dmar_readl(iommu->reg, DMAR_FEADDR_REG));
79 printk(" FEUADDR = %x\n", dmar_readl(iommu->reg, DMAR_FEUADDR_REG));
80 }
81
get_level_index(unsigned long gmfn,int level)82 static u32 get_level_index(unsigned long gmfn, int level)
83 {
84 while ( --level )
85 gmfn = gmfn >> LEVEL_STRIDE;
86
87 return gmfn & LEVEL_MASK;
88 }
89
print_vtd_entries(struct iommu * iommu,int bus,int devfn,u64 gmfn)90 void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
91 {
92 struct context_entry *ctxt_entry;
93 struct root_entry *root_entry;
94 struct dma_pte pte;
95 u64 *l, val;
96 u32 l_index, level;
97
98 printk("print_vtd_entries: iommu #%u dev %04x:%02x:%02x.%u gmfn %"PRI_gfn"\n",
99 iommu->index, iommu->intel->drhd->segment, bus,
100 PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn);
101
102 if ( iommu->root_maddr == 0 )
103 {
104 printk(" iommu->root_maddr = 0\n");
105 return;
106 }
107
108 root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr);
109 if ( root_entry == NULL )
110 {
111 printk(" root_entry == NULL\n");
112 return;
113 }
114
115 printk(" root_entry[%02x] = %"PRIx64"\n", bus, root_entry[bus].val);
116 if ( !root_present(root_entry[bus]) )
117 {
118 unmap_vtd_domain_page(root_entry);
119 printk(" root_entry[%02x] not present\n", bus);
120 return;
121 }
122
123 val = root_entry[bus].val;
124 unmap_vtd_domain_page(root_entry);
125 ctxt_entry = map_vtd_domain_page(val);
126 if ( ctxt_entry == NULL )
127 {
128 printk(" ctxt_entry == NULL\n");
129 return;
130 }
131
132 val = ctxt_entry[devfn].lo;
133 printk(" context[%02x] = %"PRIx64"_%"PRIx64"\n",
134 devfn, ctxt_entry[devfn].hi, val);
135 if ( !context_present(ctxt_entry[devfn]) )
136 {
137 unmap_vtd_domain_page(ctxt_entry);
138 printk(" ctxt_entry[%02x] not present\n", devfn);
139 return;
140 }
141
142 level = agaw_to_level(context_address_width(ctxt_entry[devfn]));
143 unmap_vtd_domain_page(ctxt_entry);
144 if ( level != VTD_PAGE_TABLE_LEVEL_3 &&
145 level != VTD_PAGE_TABLE_LEVEL_4)
146 {
147 printk("Unsupported VTD page table level (%d)!\n", level);
148 return;
149 }
150
151 do
152 {
153 l = map_vtd_domain_page(val);
154 if ( l == NULL )
155 {
156 printk(" l%u == NULL\n", level);
157 break;
158 }
159 l_index = get_level_index(gmfn, level);
160 pte.val = l[l_index];
161 unmap_vtd_domain_page(l);
162 printk(" l%u[%03x] = %"PRIx64"\n", level, l_index, pte.val);
163
164 if ( !dma_pte_present(pte) )
165 {
166 printk(" l%u[%03x] not present\n", level, l_index);
167 break;
168 }
169 if ( dma_pte_superpage(pte) )
170 break;
171 val = dma_pte_addr(pte);
172 } while ( --level );
173 }
174
vtd_dump_iommu_info(unsigned char key)175 void vtd_dump_iommu_info(unsigned char key)
176 {
177 struct acpi_drhd_unit *drhd;
178 struct iommu *iommu;
179 int i;
180
181 for_each_drhd_unit ( drhd )
182 {
183 u32 status = 0;
184
185 iommu = drhd->iommu;
186 printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index,
187 iommu->nr_pt_levels);
188
189 if ( ecap_queued_inval(iommu->ecap) || ecap_intr_remap(iommu->ecap) )
190 status = dmar_readl(iommu->reg, DMAR_GSTS_REG);
191
192 printk(" Queued Invalidation: %ssupported%s.\n",
193 ecap_queued_inval(iommu->ecap) ? "" : "not ",
194 (status & DMA_GSTS_QIES) ? " and enabled" : "" );
195
196
197 printk(" Interrupt Remapping: %ssupported%s.\n",
198 ecap_intr_remap(iommu->ecap) ? "" : "not ",
199 (status & DMA_GSTS_IRES) ? " and enabled" : "" );
200
201 printk(" Interrupt Posting: %ssupported.\n",
202 cap_intr_post(iommu->cap) ? "" : "not ");
203
204 if ( status & DMA_GSTS_IRES )
205 {
206 /* Dump interrupt remapping table. */
207 u64 iremap_maddr = dmar_readq(iommu->reg, DMAR_IRTA_REG);
208 int nr_entry = 1 << ((iremap_maddr & 0xF) + 1);
209 struct iremap_entry *iremap_entries = NULL;
210 int print_cnt = 0;
211
212 printk(" Interrupt remapping table (nr_entry=%#x. "
213 "Only dump P=1 entries here):\n", nr_entry);
214 printk("R means remapped format, P means posted format.\n");
215 printk("R: SVT SQ SID V AVL FPD DST DLM TM RH DM P\n");
216 printk("P: SVT SQ SID V AVL FPD PDA URG P\n");
217 for ( i = 0; i < nr_entry; i++ )
218 {
219 struct iremap_entry *p;
220 if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
221 {
222 /* This entry across page boundry */
223 if ( iremap_entries )
224 unmap_vtd_domain_page(iremap_entries);
225
226 GET_IREMAP_ENTRY(iremap_maddr, i,
227 iremap_entries, p);
228 }
229 else
230 p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];
231
232 if ( !p->remap.p )
233 continue;
234 if ( !p->remap.im )
235 printk("R: %04x: %x %x %04x %02x %x %x %08x %x %x %x %x %x\n",
236 i,
237 p->remap.svt, p->remap.sq, p->remap.sid,
238 p->remap.vector, p->remap.avail, p->remap.fpd,
239 p->remap.dst, p->remap.dlm, p->remap.tm, p->remap.rh,
240 p->remap.dm, p->remap.p);
241 else
242 printk("P: %04x: %x %x %04x %02x %x %x %16lx %x %x\n",
243 i,
244 p->post.svt, p->post.sq, p->post.sid, p->post.vector,
245 p->post.avail, p->post.fpd,
246 ((u64)p->post.pda_h << 32) | (p->post.pda_l << 6),
247 p->post.urg, p->post.p);
248
249 print_cnt++;
250 }
251 if ( iremap_entries )
252 unmap_vtd_domain_page(iremap_entries);
253 if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt )
254 printk("Warning: Print %d IRTE (actually have %d)!\n",
255 print_cnt, iommu_ir_ctrl(iommu)->iremap_num);
256
257 }
258 }
259
260 /* Dump the I/O xAPIC redirection table(s). */
261 if ( iommu_enabled )
262 {
263 int apic;
264 union IO_APIC_reg_01 reg_01;
265 struct IO_APIC_route_remap_entry *remap;
266 struct ir_ctrl *ir_ctrl;
267
268 for ( apic = 0; apic < nr_ioapics; apic++ )
269 {
270 iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
271 ir_ctrl = iommu_ir_ctrl(iommu);
272 if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num )
273 continue;
274
275 printk( "\nRedirection table of IOAPIC %x:\n", apic);
276
277 /* IO xAPIC Version Register. */
278 reg_01.raw = __io_apic_read(apic, 1);
279
280 printk(" #entry IDX FMT MASK TRIG IRR POL STAT DELI VECTOR\n");
281 for ( i = 0; i <= reg_01.bits.entries; i++ )
282 {
283 struct IO_APIC_route_entry rte =
284 __ioapic_read_entry(apic, i, TRUE);
285
286 remap = (struct IO_APIC_route_remap_entry *) &rte;
287 if ( !remap->format )
288 continue;
289
290 printk(" %02x: %04x %x %x %x %x %x %x"
291 " %x %02x\n", i,
292 remap->index_0_14 | (remap->index_15 << 15),
293 remap->format, remap->mask, remap->trigger, remap->irr,
294 remap->polarity, remap->delivery_status, remap->delivery_mode,
295 remap->vector);
296 }
297 }
298 }
299 }
300
301 /*
302 * Local variables:
303 * mode: C
304 * c-file-style: "BSD"
305 * c-basic-offset: 4
306 * indent-tabs-mode: nil
307 * End:
308 */
309