1 /*
2 * Copyright (C) 2011 Advanced Micro Devices, Inc.
3 * Author: Wei Wang <wei.wang2@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/sched.h>
20 #include <asm/p2m.h>
21 #include <asm/amd-iommu.h>
22 #include <asm/hvm/svm/amd-iommu-proto.h>
23
24
25 #define IOMMU_MMIO_SIZE 0x8000
26 #define IOMMU_MMIO_PAGE_NR 0x8
27 #define RING_BF_LENGTH_MASK 0x0F000000
28 #define RING_BF_LENGTH_SHIFT 24
29
30 #define PASMAX_9_bit 0x8
31 #define GUEST_CR3_1_LEVEL 0x0
32 #define GUEST_ADDRESS_SIZE_6_LEVEL 0x2
33 #define HOST_ADDRESS_SIZE_6_LEVEL 0x2
34
35 #define guest_iommu_set_status(iommu, bit) \
36 iommu_set_bit(&((iommu)->reg_status.lo), bit)
37
38 #define guest_iommu_clear_status(iommu, bit) \
39 iommu_clear_bit(&((iommu)->reg_status.lo), bit)
40
41 #define reg_to_u64(reg) (((uint64_t)reg.hi << 32) | reg.lo )
42 #define u64_to_reg(reg, val) \
43 do \
44 { \
45 (reg)->lo = (u32)(val); \
46 (reg)->hi = (val) >> 32; \
47 } while (0)
48
machine_bdf(struct domain * d,uint16_t guest_bdf)49 static unsigned int machine_bdf(struct domain *d, uint16_t guest_bdf)
50 {
51 return guest_bdf;
52 }
53
guest_bdf(struct domain * d,uint16_t machine_bdf)54 static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
55 {
56 return machine_bdf;
57 }
58
domain_iommu(struct domain * d)59 static inline struct guest_iommu *domain_iommu(struct domain *d)
60 {
61 return dom_iommu(d)->arch.g_iommu;
62 }
63
vcpu_iommu(struct vcpu * v)64 static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
65 {
66 return dom_iommu(v->domain)->arch.g_iommu;
67 }
68
guest_iommu_enable(struct guest_iommu * iommu)69 static void guest_iommu_enable(struct guest_iommu *iommu)
70 {
71 iommu->enabled = 1;
72 }
73
guest_iommu_disable(struct guest_iommu * iommu)74 static void guest_iommu_disable(struct guest_iommu *iommu)
75 {
76 iommu->enabled = 0;
77 }
78
get_guest_cr3_from_dte(dev_entry_t * dte)79 static uint64_t get_guest_cr3_from_dte(dev_entry_t *dte)
80 {
81 uint64_t gcr3_1, gcr3_2, gcr3_3;
82
83 gcr3_1 = get_field_from_reg_u32(dte->data[1],
84 IOMMU_DEV_TABLE_GCR3_1_MASK,
85 IOMMU_DEV_TABLE_GCR3_1_SHIFT);
86 gcr3_2 = get_field_from_reg_u32(dte->data[2],
87 IOMMU_DEV_TABLE_GCR3_2_MASK,
88 IOMMU_DEV_TABLE_GCR3_2_SHIFT);
89 gcr3_3 = get_field_from_reg_u32(dte->data[3],
90 IOMMU_DEV_TABLE_GCR3_3_MASK,
91 IOMMU_DEV_TABLE_GCR3_3_SHIFT);
92
93 return ((gcr3_3 << 31) | (gcr3_2 << 15 ) | (gcr3_1 << 12)) >> PAGE_SHIFT;
94 }
95
get_domid_from_dte(dev_entry_t * dte)96 static uint16_t get_domid_from_dte(dev_entry_t *dte)
97 {
98 return get_field_from_reg_u32(dte->data[2], IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
99 IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT);
100 }
101
get_glx_from_dte(dev_entry_t * dte)102 static uint16_t get_glx_from_dte(dev_entry_t *dte)
103 {
104 return get_field_from_reg_u32(dte->data[1], IOMMU_DEV_TABLE_GLX_MASK,
105 IOMMU_DEV_TABLE_GLX_SHIFT);
106 }
107
get_gv_from_dte(dev_entry_t * dte)108 static uint16_t get_gv_from_dte(dev_entry_t *dte)
109 {
110 return get_field_from_reg_u32(dte->data[1],IOMMU_DEV_TABLE_GV_MASK,
111 IOMMU_DEV_TABLE_GV_SHIFT);
112 }
113
host_domid(struct domain * d,uint64_t g_domid)114 static unsigned int host_domid(struct domain *d, uint64_t g_domid)
115 {
116 /* Only support one PPR device in guest for now */
117 return d->domain_id;
118 }
119
get_gfn_from_base_reg(uint64_t base_raw)120 static unsigned long get_gfn_from_base_reg(uint64_t base_raw)
121 {
122 base_raw &= PADDR_MASK;
123 ASSERT ( base_raw != 0 );
124 return base_raw >> PAGE_SHIFT;
125 }
126
guest_iommu_deliver_msi(struct domain * d)127 static void guest_iommu_deliver_msi(struct domain *d)
128 {
129 uint8_t vector, dest, dest_mode, delivery_mode, trig_mode;
130 struct guest_iommu *iommu = domain_iommu(d);
131
132 vector = iommu->msi.vector;
133 dest = iommu->msi.dest;
134 dest_mode = iommu->msi.dest_mode;
135 delivery_mode = iommu->msi.delivery_mode;
136 trig_mode = iommu->msi.trig_mode;
137
138 vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
139 }
140
guest_iommu_get_table_mfn(struct domain * d,uint64_t base_raw,unsigned int entry_size,unsigned int pos)141 static unsigned long guest_iommu_get_table_mfn(struct domain *d,
142 uint64_t base_raw,
143 unsigned int entry_size,
144 unsigned int pos)
145 {
146 unsigned long idx, gfn, mfn;
147 p2m_type_t p2mt;
148
149 gfn = get_gfn_from_base_reg(base_raw);
150 idx = (pos * entry_size) >> PAGE_SHIFT;
151
152 mfn = mfn_x(get_gfn(d, gfn + idx, &p2mt));
153 put_gfn(d, gfn);
154
155 return mfn;
156 }
157
guest_iommu_enable_dev_table(struct guest_iommu * iommu)158 static void guest_iommu_enable_dev_table(struct guest_iommu *iommu)
159 {
160 uint32_t length_raw = get_field_from_reg_u32(iommu->dev_table.reg_base.lo,
161 IOMMU_DEV_TABLE_SIZE_MASK,
162 IOMMU_DEV_TABLE_SIZE_SHIFT);
163 iommu->dev_table.size = (length_raw + 1) * PAGE_SIZE;
164 }
165
guest_iommu_enable_ring_buffer(struct guest_iommu * iommu,struct guest_buffer * buffer,uint32_t entry_size)166 static void guest_iommu_enable_ring_buffer(struct guest_iommu *iommu,
167 struct guest_buffer *buffer,
168 uint32_t entry_size)
169 {
170 uint32_t length_raw = get_field_from_reg_u32(buffer->reg_base.hi,
171 RING_BF_LENGTH_MASK,
172 RING_BF_LENGTH_SHIFT);
173 buffer->entries = 1 << length_raw;
174 }
175
guest_iommu_add_ppr_log(struct domain * d,u32 entry[])176 void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
177 {
178 uint16_t gdev_id;
179 unsigned long mfn, tail, head;
180 ppr_entry_t *log, *log_base;
181 struct guest_iommu *iommu;
182
183 if ( !is_hvm_domain(d) )
184 return;
185
186 iommu = domain_iommu(d);
187 if ( !iommu )
188 return;
189
190 tail = iommu_get_rb_pointer(iommu->ppr_log.reg_tail.lo);
191 head = iommu_get_rb_pointer(iommu->ppr_log.reg_head.lo);
192
193 if ( tail >= iommu->ppr_log.entries || head >= iommu->ppr_log.entries )
194 {
195 AMD_IOMMU_DEBUG("Error: guest iommu ppr log overflows\n");
196 guest_iommu_disable(iommu);
197 return;
198 }
199
200 mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->ppr_log.reg_base),
201 sizeof(ppr_entry_t), tail);
202 ASSERT(mfn_valid(_mfn(mfn)));
203
204 log_base = map_domain_page(_mfn(mfn));
205 log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
206
207 /* Convert physical device id back into virtual device id */
208 gdev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
209 iommu_set_devid_to_cmd(&entry[0], gdev_id);
210
211 memcpy(log, entry, sizeof(ppr_entry_t));
212
213 /* Now shift ppr log tail pointer */
214 if ( ++tail >= iommu->ppr_log.entries )
215 {
216 tail = 0;
217 guest_iommu_set_status(iommu, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT);
218 }
219 iommu_set_rb_pointer(&iommu->ppr_log.reg_tail.lo, tail);
220 unmap_domain_page(log_base);
221
222 guest_iommu_deliver_msi(d);
223 }
224
guest_iommu_add_event_log(struct domain * d,u32 entry[])225 void guest_iommu_add_event_log(struct domain *d, u32 entry[])
226 {
227 uint16_t dev_id;
228 unsigned long mfn, tail, head;
229 event_entry_t *log, *log_base;
230 struct guest_iommu *iommu;
231
232 if ( !is_hvm_domain(d) )
233 return;
234
235 iommu = domain_iommu(d);
236 if ( !iommu )
237 return;
238
239 tail = iommu_get_rb_pointer(iommu->event_log.reg_tail.lo);
240 head = iommu_get_rb_pointer(iommu->event_log.reg_head.lo);
241
242 if ( tail >= iommu->event_log.entries || head >= iommu->event_log.entries )
243 {
244 AMD_IOMMU_DEBUG("Error: guest iommu event overflows\n");
245 guest_iommu_disable(iommu);
246 return;
247 }
248
249 mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->event_log.reg_base),
250 sizeof(event_entry_t), tail);
251 ASSERT(mfn_valid(_mfn(mfn)));
252
253 log_base = map_domain_page(_mfn(mfn));
254 log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
255
256 /* re-write physical device id into virtual device id */
257 dev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0]));
258 iommu_set_devid_to_cmd(&entry[0], dev_id);
259 memcpy(log, entry, sizeof(event_entry_t));
260
261 /* Now shift event log tail pointer */
262 if ( ++tail >= iommu->event_log.entries )
263 {
264 tail = 0;
265 guest_iommu_set_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
266 }
267
268 iommu_set_rb_pointer(&iommu->event_log.reg_tail.lo, tail);
269 unmap_domain_page(log_base);
270
271 guest_iommu_deliver_msi(d);
272 }
273
do_complete_ppr_request(struct domain * d,cmd_entry_t * cmd)274 static int do_complete_ppr_request(struct domain *d, cmd_entry_t *cmd)
275 {
276 uint16_t dev_id;
277 struct amd_iommu *iommu;
278
279 dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
280 iommu = find_iommu_for_device(0, dev_id);
281
282 if ( !iommu )
283 {
284 AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n",
285 __func__, dev_id);
286 return -ENODEV;
287 }
288
289 /* replace virtual device id into physical */
290 iommu_set_devid_to_cmd(&cmd->data[0], dev_id);
291 amd_iommu_send_guest_cmd(iommu, cmd->data);
292
293 return 0;
294 }
295
do_invalidate_pages(struct domain * d,cmd_entry_t * cmd)296 static int do_invalidate_pages(struct domain *d, cmd_entry_t *cmd)
297 {
298 uint16_t gdom_id, hdom_id;
299 struct amd_iommu *iommu = NULL;
300
301 gdom_id = get_field_from_reg_u32(cmd->data[1],
302 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
303 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT);
304
305 hdom_id = host_domid(d, gdom_id);
306 set_field_in_reg_u32(hdom_id, cmd->data[1],
307 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
308 IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &cmd->data[1]);
309
310 for_each_amd_iommu ( iommu )
311 amd_iommu_send_guest_cmd(iommu, cmd->data);
312
313 return 0;
314 }
315
do_invalidate_all(struct domain * d,cmd_entry_t * cmd)316 static int do_invalidate_all(struct domain *d, cmd_entry_t *cmd)
317 {
318 struct amd_iommu *iommu = NULL;
319
320 for_each_amd_iommu ( iommu )
321 amd_iommu_flush_all_pages(d);
322
323 return 0;
324 }
325
do_invalidate_iotlb_pages(struct domain * d,cmd_entry_t * cmd)326 static int do_invalidate_iotlb_pages(struct domain *d, cmd_entry_t *cmd)
327 {
328 struct amd_iommu *iommu;
329 uint16_t dev_id;
330
331 dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0]));
332
333 iommu = find_iommu_for_device(0, dev_id);
334 if ( !iommu )
335 {
336 AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n",
337 __func__, dev_id);
338 return -ENODEV;
339 }
340
341 iommu_set_devid_to_cmd(&cmd->data[0], dev_id);
342 amd_iommu_send_guest_cmd(iommu, cmd->data);
343
344 return 0;
345 }
346
do_completion_wait(struct domain * d,cmd_entry_t * cmd)347 static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
348 {
349 bool_t com_wait_int_en, com_wait_int, i, s;
350 struct guest_iommu *iommu;
351 unsigned long gfn;
352 p2m_type_t p2mt;
353
354 iommu = domain_iommu(d);
355
356 i = iommu_get_bit(cmd->data[0], IOMMU_COMP_WAIT_I_FLAG_SHIFT);
357 s = iommu_get_bit(cmd->data[0], IOMMU_COMP_WAIT_S_FLAG_SHIFT);
358
359 if ( i )
360 guest_iommu_set_status(iommu, IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
361
362 if ( s )
363 {
364 uint64_t gaddr_lo, gaddr_hi, gaddr_64, data;
365 void *vaddr;
366
367 data = (uint64_t)cmd->data[3] << 32 | cmd->data[2];
368 gaddr_lo = get_field_from_reg_u32(cmd->data[0],
369 IOMMU_COMP_WAIT_ADDR_LOW_MASK,
370 IOMMU_COMP_WAIT_ADDR_LOW_SHIFT);
371 gaddr_hi = get_field_from_reg_u32(cmd->data[1],
372 IOMMU_COMP_WAIT_ADDR_HIGH_MASK,
373 IOMMU_COMP_WAIT_ADDR_HIGH_SHIFT);
374
375 gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
376
377 gfn = gaddr_64 >> PAGE_SHIFT;
378 vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
379 put_gfn(d, gfn);
380
381 write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
382 data);
383 unmap_domain_page(vaddr);
384 }
385
386 com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo,
387 IOMMU_CONTROL_COMP_WAIT_INT_SHIFT);
388 com_wait_int = iommu_get_bit(iommu->reg_status.lo,
389 IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
390
391 if ( com_wait_int_en && com_wait_int )
392 guest_iommu_deliver_msi(d);
393
394 return 0;
395 }
396
do_invalidate_dte(struct domain * d,cmd_entry_t * cmd)397 static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
398 {
399 uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id;
400 dev_entry_t *gdte, *mdte, *dte_base;
401 struct amd_iommu *iommu = NULL;
402 struct guest_iommu *g_iommu;
403 uint64_t gcr3_gfn, gcr3_mfn;
404 uint8_t glx, gv;
405 unsigned long dte_mfn, flags;
406 p2m_type_t p2mt;
407
408 g_iommu = domain_iommu(d);
409 gbdf = iommu_get_devid_from_cmd(cmd->data[0]);
410 mbdf = machine_bdf(d, gbdf);
411
412 /* Guest can only update DTEs for its passthru devices */
413 if ( mbdf == 0 || gbdf == 0 )
414 return 0;
415
416 /* Sometimes guest invalidates devices from non-exists dtes */
417 if ( (gbdf * sizeof(dev_entry_t)) > g_iommu->dev_table.size )
418 return 0;
419
420 dte_mfn = guest_iommu_get_table_mfn(d,
421 reg_to_u64(g_iommu->dev_table.reg_base),
422 sizeof(dev_entry_t), gbdf);
423 ASSERT(mfn_valid(_mfn(dte_mfn)));
424
425 /* Read guest dte information */
426 dte_base = map_domain_page(_mfn(dte_mfn));
427
428 gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
429
430 gdom_id = get_domid_from_dte(gdte);
431 gcr3_gfn = get_guest_cr3_from_dte(gdte);
432 glx = get_glx_from_dte(gdte);
433 gv = get_gv_from_dte(gdte);
434
435 unmap_domain_page(dte_base);
436
437 /* Do not update host dte before gcr3 has been set */
438 if ( gcr3_gfn == 0 )
439 return 0;
440
441 gcr3_mfn = mfn_x(get_gfn(d, gcr3_gfn, &p2mt));
442 put_gfn(d, gcr3_gfn);
443
444 ASSERT(mfn_valid(_mfn(gcr3_mfn)));
445
446 iommu = find_iommu_for_device(0, mbdf);
447 if ( !iommu )
448 {
449 AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x!\n",
450 __func__, mbdf);
451 return -ENODEV;
452 }
453
454 /* Setup host device entry */
455 hdom_id = host_domid(d, gdom_id);
456 req_id = get_dma_requestor_id(iommu->seg, mbdf);
457 mdte = iommu->dev_table.buffer + (req_id * sizeof(dev_entry_t));
458
459 spin_lock_irqsave(&iommu->lock, flags);
460 iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id,
461 gcr3_mfn << PAGE_SHIFT, gv, glx);
462
463 amd_iommu_flush_device(iommu, req_id);
464 spin_unlock_irqrestore(&iommu->lock, flags);
465
466 return 0;
467 }
468
guest_iommu_process_command(unsigned long _d)469 static void guest_iommu_process_command(unsigned long _d)
470 {
471 unsigned long opcode, tail, head, entries_per_page, cmd_mfn;
472 cmd_entry_t *cmd, *cmd_base;
473 struct domain *d = (struct domain *)_d;
474 struct guest_iommu *iommu;
475
476 iommu = domain_iommu(d);
477
478 if ( !iommu->enabled )
479 return;
480
481 head = iommu_get_rb_pointer(iommu->cmd_buffer.reg_head.lo);
482 tail = iommu_get_rb_pointer(iommu->cmd_buffer.reg_tail.lo);
483
484 /* Tail pointer is rolled over by guest driver, value outside
485 * cmd_buffer_entries cause iommu disabled
486 */
487
488 if ( tail >= iommu->cmd_buffer.entries ||
489 head >= iommu->cmd_buffer.entries )
490 {
491 AMD_IOMMU_DEBUG("Error: guest iommu cmd buffer overflows\n");
492 guest_iommu_disable(iommu);
493 return;
494 }
495
496 entries_per_page = PAGE_SIZE / sizeof(cmd_entry_t);
497
498 while ( head != tail )
499 {
500 int ret = 0;
501
502 cmd_mfn = guest_iommu_get_table_mfn(d,
503 reg_to_u64(iommu->cmd_buffer.reg_base),
504 sizeof(cmd_entry_t), head);
505 ASSERT(mfn_valid(_mfn(cmd_mfn)));
506
507 cmd_base = map_domain_page(_mfn(cmd_mfn));
508 cmd = cmd_base + head % entries_per_page;
509
510 opcode = get_field_from_reg_u32(cmd->data[1],
511 IOMMU_CMD_OPCODE_MASK,
512 IOMMU_CMD_OPCODE_SHIFT);
513 switch ( opcode )
514 {
515 case IOMMU_CMD_COMPLETION_WAIT:
516 ret = do_completion_wait(d, cmd);
517 break;
518 case IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY:
519 ret = do_invalidate_dte(d, cmd);
520 break;
521 case IOMMU_CMD_INVALIDATE_IOMMU_PAGES:
522 ret = do_invalidate_pages(d, cmd);
523 break;
524 case IOMMU_CMD_INVALIDATE_IOTLB_PAGES:
525 ret = do_invalidate_iotlb_pages(d, cmd);
526 break;
527 case IOMMU_CMD_INVALIDATE_INT_TABLE:
528 break;
529 case IOMMU_CMD_COMPLETE_PPR_REQUEST:
530 ret = do_complete_ppr_request(d, cmd);
531 break;
532 case IOMMU_CMD_INVALIDATE_IOMMU_ALL:
533 ret = do_invalidate_all(d, cmd);
534 break;
535 default:
536 AMD_IOMMU_DEBUG("CMD: Unknown command cmd_type = %lx "
537 "head = %ld\n", opcode, head);
538 break;
539 }
540
541 unmap_domain_page(cmd_base);
542 if ( ++head >= iommu->cmd_buffer.entries )
543 head = 0;
544 if ( ret )
545 guest_iommu_disable(iommu);
546 }
547
548 /* Now shift cmd buffer head pointer */
549 iommu_set_rb_pointer(&iommu->cmd_buffer.reg_head.lo, head);
550 return;
551 }
552
guest_iommu_write_ctrl(struct guest_iommu * iommu,uint64_t newctrl)553 static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t newctrl)
554 {
555 bool_t cmd_en, event_en, iommu_en, ppr_en, ppr_log_en;
556 bool_t cmd_en_old, event_en_old, iommu_en_old;
557 bool_t cmd_run;
558
559 iommu_en = iommu_get_bit(newctrl,
560 IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
561 iommu_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
562 IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
563
564 cmd_en = iommu_get_bit(newctrl,
565 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
566 cmd_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
567 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
568 cmd_run = iommu_get_bit(iommu->reg_status.lo,
569 IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT);
570 event_en = iommu_get_bit(newctrl,
571 IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
572 event_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
573 IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
574
575 ppr_en = iommu_get_bit(newctrl,
576 IOMMU_CONTROL_PPR_ENABLE_SHIFT);
577 ppr_log_en = iommu_get_bit(newctrl,
578 IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
579
580 if ( iommu_en )
581 {
582 guest_iommu_enable(iommu);
583 guest_iommu_enable_dev_table(iommu);
584 }
585
586 if ( iommu_en && cmd_en )
587 {
588 guest_iommu_enable_ring_buffer(iommu, &iommu->cmd_buffer,
589 sizeof(cmd_entry_t));
590 /* Enable iommu command processing */
591 tasklet_schedule(&iommu->cmd_buffer_tasklet);
592 }
593
594 if ( iommu_en && event_en )
595 {
596 guest_iommu_enable_ring_buffer(iommu, &iommu->event_log,
597 sizeof(event_entry_t));
598 guest_iommu_set_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT);
599 guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
600 }
601
602 if ( iommu_en && ppr_en && ppr_log_en )
603 {
604 guest_iommu_enable_ring_buffer(iommu, &iommu->ppr_log,
605 sizeof(ppr_entry_t));
606 guest_iommu_set_status(iommu, IOMMU_STATUS_PPR_LOG_RUN_SHIFT);
607 guest_iommu_clear_status(iommu, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT);
608 }
609
610 if ( iommu_en && cmd_en_old && !cmd_en )
611 {
612 /* Disable iommu command processing */
613 tasklet_kill(&iommu->cmd_buffer_tasklet);
614 }
615
616 if ( event_en_old && !event_en )
617 guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT);
618
619 if ( iommu_en_old && !iommu_en )
620 guest_iommu_disable(iommu);
621
622 u64_to_reg(&iommu->reg_ctrl, newctrl);
623 return 0;
624 }
625
iommu_mmio_read64(struct guest_iommu * iommu,unsigned long offset)626 static uint64_t iommu_mmio_read64(struct guest_iommu *iommu,
627 unsigned long offset)
628 {
629 uint64_t val;
630
631 switch ( offset )
632 {
633 case IOMMU_DEV_TABLE_BASE_LOW_OFFSET:
634 val = reg_to_u64(iommu->dev_table.reg_base);
635 break;
636 case IOMMU_CMD_BUFFER_BASE_LOW_OFFSET:
637 val = reg_to_u64(iommu->cmd_buffer.reg_base);
638 break;
639 case IOMMU_EVENT_LOG_BASE_LOW_OFFSET:
640 val = reg_to_u64(iommu->event_log.reg_base);
641 break;
642 case IOMMU_PPR_LOG_BASE_LOW_OFFSET:
643 val = reg_to_u64(iommu->ppr_log.reg_base);
644 break;
645 case IOMMU_CMD_BUFFER_HEAD_OFFSET:
646 val = reg_to_u64(iommu->cmd_buffer.reg_head);
647 break;
648 case IOMMU_CMD_BUFFER_TAIL_OFFSET:
649 val = reg_to_u64(iommu->cmd_buffer.reg_tail);
650 break;
651 case IOMMU_EVENT_LOG_HEAD_OFFSET:
652 val = reg_to_u64(iommu->event_log.reg_head);
653 break;
654 case IOMMU_EVENT_LOG_TAIL_OFFSET:
655 val = reg_to_u64(iommu->event_log.reg_tail);
656 break;
657 case IOMMU_PPR_LOG_HEAD_OFFSET:
658 val = reg_to_u64(iommu->ppr_log.reg_head);
659 break;
660 case IOMMU_PPR_LOG_TAIL_OFFSET:
661 val = reg_to_u64(iommu->ppr_log.reg_tail);
662 break;
663 case IOMMU_CONTROL_MMIO_OFFSET:
664 val = reg_to_u64(iommu->reg_ctrl);
665 break;
666 case IOMMU_STATUS_MMIO_OFFSET:
667 val = reg_to_u64(iommu->reg_status);
668 break;
669 case IOMMU_EXT_FEATURE_MMIO_OFFSET:
670 val = reg_to_u64(iommu->reg_ext_feature);
671 break;
672
673 default:
674 AMD_IOMMU_DEBUG("Guest reads unknown mmio offset = %lx\n", offset);
675 val = 0;
676 break;
677 }
678
679 return val;
680 }
681
guest_iommu_mmio_read(struct vcpu * v,unsigned long addr,unsigned int len,unsigned long * pval)682 static int guest_iommu_mmio_read(struct vcpu *v, unsigned long addr,
683 unsigned int len, unsigned long *pval)
684 {
685 struct guest_iommu *iommu = vcpu_iommu(v);
686 unsigned long offset;
687 uint64_t val;
688 uint32_t mmio, shift;
689 uint64_t mask = 0;
690
691 offset = addr - iommu->mmio_base;
692
693 if ( unlikely((offset & (len - 1 )) || (len > 8)) )
694 {
695 AMD_IOMMU_DEBUG("iommu mmio read access is not aligned:"
696 " offset = %lx, len = %x\n", offset, len);
697 return X86EMUL_UNHANDLEABLE;
698 }
699
700 mask = (len == 8) ? ~0ULL : (1ULL << (len * 8)) - 1;
701 shift = (offset & 7u) * 8;
702
703 /* mmio access is always aligned on 8-byte boundary */
704 mmio = offset & (~7u);
705
706 spin_lock(&iommu->lock);
707 val = iommu_mmio_read64(iommu, mmio);
708 spin_unlock(&iommu->lock);
709
710 *pval = (val >> shift ) & mask;
711
712 return X86EMUL_OKAY;
713 }
714
guest_iommu_mmio_write64(struct guest_iommu * iommu,unsigned long offset,uint64_t val)715 static void guest_iommu_mmio_write64(struct guest_iommu *iommu,
716 unsigned long offset, uint64_t val)
717 {
718 switch ( offset )
719 {
720 case IOMMU_DEV_TABLE_BASE_LOW_OFFSET:
721 u64_to_reg(&iommu->dev_table.reg_base, val);
722 break;
723 case IOMMU_CMD_BUFFER_BASE_LOW_OFFSET:
724 u64_to_reg(&iommu->cmd_buffer.reg_base, val);
725 break;
726 case IOMMU_EVENT_LOG_BASE_LOW_OFFSET:
727 u64_to_reg(&iommu->event_log.reg_base, val);
728 break;
729 case IOMMU_PPR_LOG_BASE_LOW_OFFSET:
730 u64_to_reg(&iommu->ppr_log.reg_base, val);
731 break;
732 case IOMMU_CONTROL_MMIO_OFFSET:
733 guest_iommu_write_ctrl(iommu, val);
734 break;
735 case IOMMU_CMD_BUFFER_HEAD_OFFSET:
736 u64_to_reg(&iommu->cmd_buffer.reg_head, val);
737 break;
738 case IOMMU_CMD_BUFFER_TAIL_OFFSET:
739 u64_to_reg(&iommu->cmd_buffer.reg_tail, val);
740 tasklet_schedule(&iommu->cmd_buffer_tasklet);
741 break;
742 case IOMMU_EVENT_LOG_HEAD_OFFSET:
743 u64_to_reg(&iommu->event_log.reg_head, val);
744 break;
745 case IOMMU_EVENT_LOG_TAIL_OFFSET:
746 u64_to_reg(&iommu->event_log.reg_tail, val);
747 break;
748 case IOMMU_PPR_LOG_HEAD_OFFSET:
749 u64_to_reg(&iommu->ppr_log.reg_head, val);
750 break;
751 case IOMMU_PPR_LOG_TAIL_OFFSET:
752 u64_to_reg(&iommu->ppr_log.reg_tail, val);
753 break;
754 case IOMMU_STATUS_MMIO_OFFSET:
755 val &= IOMMU_STATUS_EVENT_OVERFLOW_MASK |
756 IOMMU_STATUS_EVENT_LOG_INT_MASK |
757 IOMMU_STATUS_COMP_WAIT_INT_MASK |
758 IOMMU_STATUS_PPR_LOG_OVERFLOW_MASK |
759 IOMMU_STATUS_PPR_LOG_INT_MASK |
760 IOMMU_STATUS_GAPIC_LOG_OVERFLOW_MASK |
761 IOMMU_STATUS_GAPIC_LOG_INT_MASK;
762 u64_to_reg(&iommu->reg_status, reg_to_u64(iommu->reg_status) & ~val);
763 break;
764
765 default:
766 AMD_IOMMU_DEBUG("guest writes unknown mmio offset = %lx,"
767 " val = %" PRIx64 "\n", offset, val);
768 break;
769 }
770 }
771
guest_iommu_mmio_write(struct vcpu * v,unsigned long addr,unsigned int len,unsigned long val)772 static int guest_iommu_mmio_write(struct vcpu *v, unsigned long addr,
773 unsigned int len, unsigned long val)
774 {
775 struct guest_iommu *iommu = vcpu_iommu(v);
776 unsigned long offset;
777 uint64_t reg_old, mmio;
778 uint32_t shift;
779 uint64_t mask = 0;
780
781 offset = addr - iommu->mmio_base;
782
783 if ( unlikely((offset & (len - 1)) || (len > 8)) )
784 {
785 AMD_IOMMU_DEBUG("iommu mmio write access is not aligned:"
786 " offset = %lx, len = %x\n", offset, len);
787 return X86EMUL_UNHANDLEABLE;
788 }
789
790 mask = (len == 8) ? ~0ULL : (1ULL << (len * 8)) - 1;
791 shift = (offset & 7) * 8;
792
793 /* mmio access is always aligned on 8-byte boundary */
794 mmio = offset & ~7;
795
796 spin_lock(&iommu->lock);
797
798 reg_old = iommu_mmio_read64(iommu, mmio);
799 reg_old &= ~(mask << shift);
800 val = reg_old | ((val & mask) << shift);
801 guest_iommu_mmio_write64(iommu, mmio, val);
802
803 spin_unlock(&iommu->lock);
804
805 return X86EMUL_OKAY;
806 }
807
guest_iommu_set_base(struct domain * d,uint64_t base)808 int guest_iommu_set_base(struct domain *d, uint64_t base)
809 {
810 p2m_type_t t;
811 struct guest_iommu *iommu = domain_iommu(d);
812
813 if ( !iommu )
814 return -EACCES;
815
816 iommu->mmio_base = base;
817 base >>= PAGE_SHIFT;
818
819 for ( int i = 0; i < IOMMU_MMIO_PAGE_NR; i++ )
820 {
821 unsigned long gfn = base + i;
822
823 get_gfn_query(d, gfn, &t);
824 p2m_change_type_one(d, gfn, t, p2m_mmio_dm);
825 put_gfn(d, gfn);
826 }
827
828 return 0;
829 }
830
831 /* Initialize mmio read only bits */
guest_iommu_reg_init(struct guest_iommu * iommu)832 static void guest_iommu_reg_init(struct guest_iommu *iommu)
833 {
834 uint32_t lower, upper;
835
836 lower = upper = 0;
837 /* Support prefetch */
838 iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PREFSUP_SHIFT);
839 /* Support PPR log */
840 iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PPRSUP_SHIFT);
841 /* Support guest translation */
842 iommu_set_bit(&lower,IOMMU_EXT_FEATURE_GTSUP_SHIFT);
843 /* Support invalidate all command */
844 iommu_set_bit(&lower,IOMMU_EXT_FEATURE_IASUP_SHIFT);
845
846 /* Host translation size has 6 levels */
847 set_field_in_reg_u32(HOST_ADDRESS_SIZE_6_LEVEL, lower,
848 IOMMU_EXT_FEATURE_HATS_MASK,
849 IOMMU_EXT_FEATURE_HATS_SHIFT,
850 &lower);
851 /* Guest translation size has 6 levels */
852 set_field_in_reg_u32(GUEST_ADDRESS_SIZE_6_LEVEL, lower,
853 IOMMU_EXT_FEATURE_GATS_MASK,
854 IOMMU_EXT_FEATURE_GATS_SHIFT,
855 &lower);
856 /* Single level gCR3 */
857 set_field_in_reg_u32(GUEST_CR3_1_LEVEL, lower,
858 IOMMU_EXT_FEATURE_GLXSUP_MASK,
859 IOMMU_EXT_FEATURE_GLXSUP_SHIFT, &lower);
860 /* 9 bit PASID */
861 set_field_in_reg_u32(PASMAX_9_bit, upper,
862 IOMMU_EXT_FEATURE_PASMAX_MASK,
863 IOMMU_EXT_FEATURE_PASMAX_SHIFT, &upper);
864
865 iommu->reg_ext_feature.lo = lower;
866 iommu->reg_ext_feature.hi = upper;
867 }
868
guest_iommu_mmio_range(struct vcpu * v,unsigned long addr)869 static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
870 {
871 struct guest_iommu *iommu = vcpu_iommu(v);
872
873 return iommu && addr >= iommu->mmio_base &&
874 addr < iommu->mmio_base + IOMMU_MMIO_SIZE;
875 }
876
877 static const struct hvm_mmio_ops iommu_mmio_ops = {
878 .check = guest_iommu_mmio_range,
879 .read = guest_iommu_mmio_read,
880 .write = guest_iommu_mmio_write
881 };
882
883 /* Domain specific initialization */
guest_iommu_init(struct domain * d)884 int guest_iommu_init(struct domain* d)
885 {
886 struct guest_iommu *iommu;
887 struct domain_iommu *hd = dom_iommu(d);
888
889 if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled ||
890 !has_viommu(d) )
891 return 0;
892
893 iommu = xzalloc(struct guest_iommu);
894 if ( !iommu )
895 {
896 AMD_IOMMU_DEBUG("Error allocating guest iommu structure.\n");
897 return 1;
898 }
899
900 guest_iommu_reg_init(iommu);
901 iommu->mmio_base = ~0ULL;
902 iommu->domain = d;
903 hd->arch.g_iommu = iommu;
904
905 tasklet_init(&iommu->cmd_buffer_tasklet,
906 guest_iommu_process_command, (unsigned long)d);
907
908 spin_lock_init(&iommu->lock);
909
910 register_mmio_handler(d, &iommu_mmio_ops);
911
912 return 0;
913 }
914
guest_iommu_destroy(struct domain * d)915 void guest_iommu_destroy(struct domain *d)
916 {
917 struct guest_iommu *iommu;
918
919 iommu = domain_iommu(d);
920 if ( !iommu )
921 return;
922
923 tasklet_kill(&iommu->cmd_buffer_tasklet);
924 xfree(iommu);
925
926 dom_iommu(d)->arch.g_iommu = NULL;
927 }
928