1 /*
2 * Copyright (C) 2011 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <xen/sched.h>
21 #include <asm/amd-iommu.h>
22 #include <asm/hvm/svm/amd-iommu-proto.h>
23 #include "../ats.h"
24
queue_iommu_command(struct amd_iommu * iommu,u32 cmd[])25 static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
26 {
27 u32 tail, head, *cmd_buffer;
28 int i;
29
30 tail = iommu->cmd_buffer.tail;
31 if ( ++tail == iommu->cmd_buffer.entries )
32 tail = 0;
33
34 head = iommu_get_rb_pointer(readl(iommu->mmio_base +
35 IOMMU_CMD_BUFFER_HEAD_OFFSET));
36 if ( head != tail )
37 {
38 cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
39 (iommu->cmd_buffer.tail *
40 IOMMU_CMD_BUFFER_ENTRY_SIZE));
41
42 for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
43 cmd_buffer[i] = cmd[i];
44
45 iommu->cmd_buffer.tail = tail;
46 return 1;
47 }
48
49 return 0;
50 }
51
commit_iommu_command_buffer(struct amd_iommu * iommu)52 static void commit_iommu_command_buffer(struct amd_iommu *iommu)
53 {
54 u32 tail = 0;
55
56 iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail);
57 writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
58 }
59
send_iommu_command(struct amd_iommu * iommu,u32 cmd[])60 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
61 {
62 if ( queue_iommu_command(iommu, cmd) )
63 {
64 commit_iommu_command_buffer(iommu);
65 return 1;
66 }
67
68 return 0;
69 }
70
flush_command_buffer(struct amd_iommu * iommu)71 static void flush_command_buffer(struct amd_iommu *iommu)
72 {
73 u32 cmd[4], status;
74 int loop_count, comp_wait;
75
76 /* RW1C 'ComWaitInt' in status register */
77 writel(IOMMU_STATUS_COMP_WAIT_INT_MASK,
78 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
79
80 /* send an empty COMPLETION_WAIT command to flush command buffer */
81 cmd[3] = cmd[2] = 0;
82 set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
83 IOMMU_CMD_OPCODE_MASK,
84 IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
85 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
86 IOMMU_COMP_WAIT_I_FLAG_MASK,
87 IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
88 send_iommu_command(iommu, cmd);
89
90 /* Make loop_count long enough for polling completion wait bit */
91 loop_count = 1000;
92 do {
93 status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
94 comp_wait = get_field_from_reg_u32(status,
95 IOMMU_STATUS_COMP_WAIT_INT_MASK,
96 IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
97 --loop_count;
98 } while ( !comp_wait && loop_count );
99
100 if ( comp_wait )
101 {
102 /* RW1C 'ComWaitInt' in status register */
103 writel(IOMMU_STATUS_COMP_WAIT_INT_MASK,
104 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
105 return;
106 }
107 AMD_IOMMU_DEBUG("Warning: ComWaitInt bit did not assert!\n");
108 }
109
110 /* Build low level iommu command messages */
invalidate_iommu_pages(struct amd_iommu * iommu,u64 io_addr,u16 domain_id,u16 order)111 static void invalidate_iommu_pages(struct amd_iommu *iommu,
112 u64 io_addr, u16 domain_id, u16 order)
113 {
114 u64 addr_lo, addr_hi;
115 u32 cmd[4], entry;
116 int sflag = 0, pde = 0;
117
118 ASSERT ( order == 0 || order == 9 || order == 18 );
119
120 /* All pages associated with the domainID are invalidated */
121 if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
122 {
123 sflag = 1;
124 pde = 1;
125 }
126
127 /* If sflag == 1, the size of the invalidate command is determined
128 by the first zero bit in the address starting from Address[12] */
129 if ( order )
130 {
131 u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
132 io_addr &= ~mask;
133 io_addr |= mask - 1;
134 }
135
136 addr_lo = io_addr & DMA_32BIT_MASK;
137 addr_hi = io_addr >> 32;
138
139 set_field_in_reg_u32(domain_id, 0,
140 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
141 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
142 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
143 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
144 &entry);
145 cmd[1] = entry;
146
147 set_field_in_reg_u32(sflag, 0,
148 IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
149 IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
150 set_field_in_reg_u32(pde, entry,
151 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
152 IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
153 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
154 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
155 IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
156 cmd[2] = entry;
157
158 set_field_in_reg_u32((u32)addr_hi, 0,
159 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
160 IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
161 cmd[3] = entry;
162
163 cmd[0] = 0;
164 send_iommu_command(iommu, cmd);
165 }
166
invalidate_iotlb_pages(struct amd_iommu * iommu,u16 maxpend,u32 pasid,u16 queueid,u64 io_addr,u16 dev_id,u16 order)167 static void invalidate_iotlb_pages(struct amd_iommu *iommu,
168 u16 maxpend, u32 pasid, u16 queueid,
169 u64 io_addr, u16 dev_id, u16 order)
170 {
171 u64 addr_lo, addr_hi;
172 u32 cmd[4], entry;
173 int sflag = 0;
174
175 ASSERT ( order == 0 || order == 9 || order == 18 );
176
177 if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
178 sflag = 1;
179
180 /* If sflag == 1, the size of the invalidate command is determined
181 by the first zero bit in the address starting from Address[12] */
182 if ( order )
183 {
184 u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
185 io_addr &= ~mask;
186 io_addr |= mask - 1;
187 }
188
189 addr_lo = io_addr & DMA_32BIT_MASK;
190 addr_hi = io_addr >> 32;
191
192 set_field_in_reg_u32(dev_id, 0,
193 IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK,
194 IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry);
195
196 set_field_in_reg_u32(maxpend, entry,
197 IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK,
198 IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry);
199
200 set_field_in_reg_u32(pasid & 0xff, entry,
201 IOMMU_INV_IOTLB_PAGES_PASID1_MASK,
202 IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry);
203 cmd[0] = entry;
204
205 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0,
206 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
207 &entry);
208
209 set_field_in_reg_u32(pasid >> 8, entry,
210 IOMMU_INV_IOTLB_PAGES_PASID2_MASK,
211 IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT,
212 &entry);
213
214 set_field_in_reg_u32(queueid, entry,
215 IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK,
216 IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT,
217 &entry);
218 cmd[1] = entry;
219
220 set_field_in_reg_u32(sflag, 0,
221 IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK,
222 IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry);
223
224 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
225 IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK,
226 IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry);
227 cmd[2] = entry;
228
229 set_field_in_reg_u32((u32)addr_hi, 0,
230 IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK,
231 IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry);
232 cmd[3] = entry;
233
234 send_iommu_command(iommu, cmd);
235 }
236
invalidate_dev_table_entry(struct amd_iommu * iommu,u16 device_id)237 static void invalidate_dev_table_entry(struct amd_iommu *iommu,
238 u16 device_id)
239 {
240 u32 cmd[4], entry;
241
242 cmd[3] = cmd[2] = 0;
243 set_field_in_reg_u32(device_id, 0,
244 IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
245 IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
246 cmd[0] = entry;
247
248 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
249 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
250 &entry);
251 cmd[1] = entry;
252
253 send_iommu_command(iommu, cmd);
254 }
255
invalidate_interrupt_table(struct amd_iommu * iommu,u16 device_id)256 static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
257 {
258 u32 cmd[4], entry;
259
260 cmd[3] = cmd[2] = 0;
261 set_field_in_reg_u32(device_id, 0,
262 IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
263 IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
264 cmd[0] = entry;
265 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
266 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
267 &entry);
268 cmd[1] = entry;
269 send_iommu_command(iommu, cmd);
270 }
271
invalidate_iommu_all(struct amd_iommu * iommu)272 void invalidate_iommu_all(struct amd_iommu *iommu)
273 {
274 u32 cmd[4], entry;
275
276 cmd[3] = cmd[2] = cmd[0] = 0;
277
278 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_ALL, 0,
279 IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
280 &entry);
281 cmd[1] = entry;
282
283 send_iommu_command(iommu, cmd);
284 }
285
amd_iommu_flush_iotlb(u8 devfn,const struct pci_dev * pdev,uint64_t gaddr,unsigned int order)286 void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
287 uint64_t gaddr, unsigned int order)
288 {
289 unsigned long flags;
290 struct amd_iommu *iommu;
291 unsigned int req_id, queueid, maxpend;
292
293 if ( !ats_enabled )
294 return;
295
296 if ( !pci_ats_enabled(pdev->seg, pdev->bus, pdev->devfn) )
297 return;
298
299 iommu = find_iommu_for_device(pdev->seg, PCI_BDF2(pdev->bus, pdev->devfn));
300
301 if ( !iommu )
302 {
303 AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n",
304 __func__, pdev->seg, pdev->bus,
305 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
306 return;
307 }
308
309 if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
310 return;
311
312 req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn));
313 queueid = req_id;
314 maxpend = pdev->ats.queue_depth & 0xff;
315
316 /* send INVALIDATE_IOTLB_PAGES command */
317 spin_lock_irqsave(&iommu->lock, flags);
318 invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order);
319 flush_command_buffer(iommu);
320 spin_unlock_irqrestore(&iommu->lock, flags);
321 }
322
amd_iommu_flush_all_iotlbs(struct domain * d,uint64_t gaddr,unsigned int order)323 static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
324 unsigned int order)
325 {
326 struct pci_dev *pdev;
327
328 if ( !ats_enabled )
329 return;
330
331 for_each_pdev( d, pdev )
332 {
333 u8 devfn = pdev->devfn;
334
335 do {
336 amd_iommu_flush_iotlb(devfn, pdev, gaddr, order);
337 devfn += pdev->phantom_stride;
338 } while ( devfn != pdev->devfn &&
339 PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
340 }
341 }
342
343 /* Flush iommu cache after p2m changes. */
_amd_iommu_flush_pages(struct domain * d,uint64_t gaddr,unsigned int order)344 static void _amd_iommu_flush_pages(struct domain *d,
345 uint64_t gaddr, unsigned int order)
346 {
347 unsigned long flags;
348 struct amd_iommu *iommu;
349 unsigned int dom_id = d->domain_id;
350
351 /* send INVALIDATE_IOMMU_PAGES command */
352 for_each_amd_iommu ( iommu )
353 {
354 spin_lock_irqsave(&iommu->lock, flags);
355 invalidate_iommu_pages(iommu, gaddr, dom_id, order);
356 flush_command_buffer(iommu);
357 spin_unlock_irqrestore(&iommu->lock, flags);
358 }
359
360 if ( ats_enabled )
361 amd_iommu_flush_all_iotlbs(d, gaddr, order);
362 }
363
amd_iommu_flush_all_pages(struct domain * d)364 void amd_iommu_flush_all_pages(struct domain *d)
365 {
366 _amd_iommu_flush_pages(d, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
367 }
368
amd_iommu_flush_pages(struct domain * d,unsigned long gfn,unsigned int order)369 void amd_iommu_flush_pages(struct domain *d,
370 unsigned long gfn, unsigned int order)
371 {
372 _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
373 }
374
amd_iommu_flush_device(struct amd_iommu * iommu,uint16_t bdf)375 void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
376 {
377 ASSERT( spin_is_locked(&iommu->lock) );
378
379 invalidate_dev_table_entry(iommu, bdf);
380 flush_command_buffer(iommu);
381 }
382
amd_iommu_flush_intremap(struct amd_iommu * iommu,uint16_t bdf)383 void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf)
384 {
385 ASSERT( spin_is_locked(&iommu->lock) );
386
387 invalidate_interrupt_table(iommu, bdf);
388 flush_command_buffer(iommu);
389 }
390
amd_iommu_flush_all_caches(struct amd_iommu * iommu)391 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
392 {
393 ASSERT( spin_is_locked(&iommu->lock) );
394
395 invalidate_iommu_all(iommu);
396 flush_command_buffer(iommu);
397 }
398
amd_iommu_send_guest_cmd(struct amd_iommu * iommu,u32 cmd[])399 void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[])
400 {
401 unsigned long flags;
402
403 spin_lock_irqsave(&iommu->lock, flags);
404
405 send_iommu_command(iommu, cmd);
406 flush_command_buffer(iommu);
407
408 spin_unlock_irqrestore(&iommu->lock, flags);
409 }
410