1 // Copyright 2018 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include "iommu_impl.h"
8
9 #include <dev/interrupt.h>
10 #include <err.h>
11 #include <fbl/algorithm.h>
12 #include <fbl/auto_lock.h>
13 #include <fbl/limits.h>
14 #include <fbl/ref_ptr.h>
15 #include <ktl/unique_ptr.h>
16 #include <ktl/move.h>
17 #include <new>
18 #include <platform.h>
19 #include <trace.h>
20 #include <vm/vm_aspace.h>
21 #include <vm/vm_object_paged.h>
22 #include <vm/vm_object_physical.h>
23 #include <zircon/time.h>
24
25 #include "context_table_state.h"
26 #include "device_context.h"
27 #include "hw.h"
28
29 #define LOCAL_TRACE 0
30
31 namespace intel_iommu {
32
IommuImpl(volatile void * register_base,ktl::unique_ptr<const uint8_t[]> desc,size_t desc_len)33 IommuImpl::IommuImpl(volatile void* register_base,
34 ktl::unique_ptr<const uint8_t[]> desc, size_t desc_len)
35 : desc_(ktl::move(desc)), desc_len_(desc_len), mmio_(register_base) {
36 memset(&irq_block_, 0, sizeof(irq_block_));
37 // desc_len_ is currently unused, but we stash it so we can use the length
38 // of it later in case we need it. This silences a warning in Clang.
39 desc_len_ = desc_len;
40 }
41
Create(ktl::unique_ptr<const uint8_t[]> desc_bytes,size_t desc_len,fbl::RefPtr<Iommu> * out)42 zx_status_t IommuImpl::Create(ktl::unique_ptr<const uint8_t[]> desc_bytes, size_t desc_len,
43 fbl::RefPtr<Iommu>* out) {
44 zx_status_t status = ValidateIommuDesc(desc_bytes, desc_len);
45 if (status != ZX_OK) {
46 return status;
47 }
48
49 auto desc = reinterpret_cast<const zx_iommu_desc_intel_t*>(desc_bytes.get());
50 const uint64_t register_base = desc->register_base;
51
52 auto kernel_aspace = VmAspace::kernel_aspace();
53 void *vaddr;
54 status = kernel_aspace->AllocPhysical(
55 "iommu",
56 PAGE_SIZE,
57 &vaddr,
58 PAGE_SIZE_SHIFT,
59 register_base,
60 0,
61 ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE | ARCH_MMU_FLAG_UNCACHED);
62 if (status != ZX_OK) {
63 return status;
64 }
65
66 fbl::AllocChecker ac;
67 auto instance = fbl::AdoptRef<IommuImpl>(new (&ac) IommuImpl(vaddr, ktl::move(desc_bytes),
68 desc_len));
69 if (!ac.check()) {
70 kernel_aspace->FreeRegion(reinterpret_cast<vaddr_t>(vaddr));
71 return ZX_ERR_NO_MEMORY;
72 }
73
74 status = instance->Initialize();
75 if (status != ZX_OK) {
76 return status;
77 }
78
79 *out = ktl::move(instance);
80 return ZX_OK;
81 }
82
~IommuImpl()83 IommuImpl::~IommuImpl() {
84 fbl::AutoLock guard(&lock_);
85
86 // We cannot unpin memory until translation is disabled
87 zx_status_t status = SetTranslationEnableLocked(false, ZX_TIME_INFINITE);
88 ASSERT(status == ZX_OK);
89
90 DisableFaultsLocked();
91 msi_free_block(&irq_block_);
92
93 VmAspace::kernel_aspace()->FreeRegion(mmio_.base());
94 }
95
96 // Validate the IOMMU descriptor from userspace.
97 //
98 // The IOMMU descriptor identifies either a whitelist (if whole_segment is false)
99 // or a blacklist (if whole_segment is true) of devices that are decoded by this
100 // IOMMU. An entry in the list is described by a "scope" below. A scope
101 // identifies a single PCIe device. If the device is behind a bridge, it will be
102 // described using multiple "hops", one for each bridge in the way and one for
103 // the device itself. A hop identifies the address of a bridge on the path to
104 // the device, or (in the final entry) the address of the device itself.
105 //
106 // The descriptor also contains a list of "Reserved Memory Regions", which
107 // describes regions of physical address space that must be identity-mapped for
108 // specific devices to function correctly. There is typically one region for
109 // the i915 gpu (initial framebuffer) and one for the XHCI controller
110 // (scratch space for the BIOS before the OS takes ownership of the controller).
ValidateIommuDesc(const ktl::unique_ptr<const uint8_t[]> & desc_bytes,size_t desc_len)111 zx_status_t IommuImpl::ValidateIommuDesc(const ktl::unique_ptr<const uint8_t[]>& desc_bytes,
112 size_t desc_len) {
113 auto desc = reinterpret_cast<const zx_iommu_desc_intel_t*>(desc_bytes.get());
114
115 // Validate the size
116 if (desc_len < sizeof(*desc)) {
117 LTRACEF("desc too short: %zu < %zu\n", desc_len, sizeof(*desc));
118 return ZX_ERR_INVALID_ARGS;
119 }
120 static_assert(sizeof(desc->scope_bytes) < sizeof(size_t),
121 "if this changes, need to check for overflow");
122 size_t actual_size = sizeof(*desc);
123 if (add_overflow(actual_size, desc->scope_bytes, &actual_size) ||
124 add_overflow(actual_size, desc->reserved_memory_bytes, &actual_size) ||
125 actual_size != desc_len) {
126
127 LTRACEF("desc size mismatch: %zu != %zu\n", desc_len, actual_size);
128 return ZX_ERR_INVALID_ARGS;
129 }
130
131 // Validate scopes
132 if (desc->scope_bytes == 0 && !desc->whole_segment) {
133 LTRACEF("desc has no scopes\n");
134 return ZX_ERR_INVALID_ARGS;
135 }
136 const size_t num_scopes = desc->scope_bytes / sizeof(zx_iommu_desc_intel_scope_t);
137 size_t scope_bytes = num_scopes;
138 if (mul_overflow(scope_bytes, sizeof(zx_iommu_desc_intel_scope_t), &scope_bytes) ||
139 scope_bytes != desc->scope_bytes) {
140
141 LTRACEF("desc has invalid scope_bytes field\n");
142 return ZX_ERR_INVALID_ARGS;
143 }
144
145 auto scopes = reinterpret_cast<zx_iommu_desc_intel_scope_t*>(
146 reinterpret_cast<uintptr_t>(desc) + sizeof(*desc));
147 for (size_t i = 0; i < num_scopes; ++i) {
148 if (scopes[i].num_hops == 0) {
149 LTRACEF("desc scope %zu has no hops\n", i);
150 return ZX_ERR_INVALID_ARGS;
151 }
152 if (scopes[i].num_hops > fbl::count_of(scopes[0].dev_func)) {
153 LTRACEF("desc scope %zu has too many hops\n", i);
154 return ZX_ERR_INVALID_ARGS;
155 }
156 }
157
158 // Validate reserved memory regions
159 size_t cursor_bytes = sizeof(*desc) + desc->scope_bytes;
160 while (cursor_bytes + sizeof(zx_iommu_desc_intel_reserved_memory_t) < desc_len) {
161 auto mem = reinterpret_cast<zx_iommu_desc_intel_reserved_memory_t*>(
162 reinterpret_cast<uintptr_t>(desc) + cursor_bytes);
163
164 size_t next_entry = cursor_bytes;
165 if (add_overflow(next_entry, sizeof(zx_iommu_desc_intel_reserved_memory_t), &next_entry) ||
166 add_overflow(next_entry, mem->scope_bytes, &next_entry) ||
167 next_entry > desc_len) {
168
169 LTRACEF("desc reserved memory entry has invalid scope_bytes\n");
170 return ZX_ERR_INVALID_ARGS;
171 }
172
173 // TODO(teisenbe): Make sure that the reserved memory regions are not in our
174 // allocatable RAM pools
175
176 // Validate scopes
177 if (mem->scope_bytes == 0) {
178 LTRACEF("desc reserved memory entry has no scopes\n");
179 return ZX_ERR_INVALID_ARGS;
180 }
181 const size_t num_scopes = mem->scope_bytes / sizeof(zx_iommu_desc_intel_scope_t);
182 size_t scope_bytes = num_scopes;
183 if (mul_overflow(scope_bytes, sizeof(zx_iommu_desc_intel_scope_t), &scope_bytes) ||
184 scope_bytes != desc->scope_bytes) {
185
186 LTRACEF("desc reserved memory entry has invalid scope_bytes field\n");
187 return ZX_ERR_INVALID_ARGS;
188 }
189
190 auto scopes = reinterpret_cast<zx_iommu_desc_intel_scope_t*>(
191 reinterpret_cast<uintptr_t>(mem) + sizeof(*mem));
192 for (size_t i = 0; i < num_scopes; ++i) {
193 if (scopes[i].num_hops == 0) {
194 LTRACEF("desc reserved memory entry scope %zu has no hops\n", i);
195 return ZX_ERR_INVALID_ARGS;
196 }
197 if (scopes[i].num_hops > fbl::count_of(scopes[0].dev_func)) {
198 LTRACEF("desc reserved memory entry scope %zu has too many hops\n", i);
199 return ZX_ERR_INVALID_ARGS;
200 }
201 }
202
203 cursor_bytes = next_entry;
204 }
205 if (cursor_bytes != desc_len) {
206 LTRACEF("desc has invalid reserved_memory_bytes field\n");
207 return ZX_ERR_INVALID_ARGS;
208 }
209
210 LTRACEF("validated desc\n");
211 return ZX_OK;
212 }
213
IsValidBusTxnId(uint64_t bus_txn_id) const214 bool IommuImpl::IsValidBusTxnId(uint64_t bus_txn_id) const {
215 if (bus_txn_id > UINT16_MAX) {
216 return false;
217 }
218
219 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
220
221 auto desc = reinterpret_cast<const zx_iommu_desc_intel_t*>(desc_.get());
222 const size_t num_scopes = desc->scope_bytes / sizeof(zx_iommu_desc_intel_scope_t);
223 auto scopes = reinterpret_cast<zx_iommu_desc_intel_scope_t*>(
224 reinterpret_cast<uintptr_t>(desc) + sizeof(*desc));
225
226 // Search for this BDF in the scopes we have
227 for (size_t i = 0; i < num_scopes; ++i) {
228 if (scopes[i].num_hops != 1) {
229 // TODO(teisenbe): Implement
230 continue;
231 }
232
233 if (scopes[i].start_bus == bdf.bus() &&
234 scopes[i].dev_func[0] == bdf.packed_dev_and_func()) {
235 return !desc->whole_segment;
236 }
237 }
238
239 if (desc->whole_segment) {
240 // Since we only support single segment currently, just return true
241 // here. To support more segments, we need to make sure the segment
242 // matches, too.
243 return true;
244 }
245
246 return false;
247 }
248
Map(uint64_t bus_txn_id,const fbl::RefPtr<VmObject> & vmo,uint64_t offset,size_t size,uint32_t perms,dev_vaddr_t * vaddr,size_t * mapped_len)249 zx_status_t IommuImpl::Map(uint64_t bus_txn_id, const fbl::RefPtr<VmObject>& vmo,
250 uint64_t offset, size_t size, uint32_t perms,
251 dev_vaddr_t* vaddr, size_t* mapped_len) {
252 DEBUG_ASSERT(vaddr);
253 if (!IS_PAGE_ALIGNED(offset) || size == 0) {
254 return ZX_ERR_INVALID_ARGS;
255 }
256 if (perms & ~(IOMMU_FLAG_PERM_READ | IOMMU_FLAG_PERM_WRITE | IOMMU_FLAG_PERM_EXECUTE)) {
257 return ZX_ERR_INVALID_ARGS;
258 }
259 if (perms == 0) {
260 return ZX_ERR_INVALID_ARGS;
261 }
262 if (!IsValidBusTxnId(bus_txn_id)) {
263 return ZX_ERR_NOT_FOUND;
264 }
265
266 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
267
268 fbl::AutoLock guard(&lock_);
269 DeviceContext* dev;
270 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
271 if (status != ZX_OK) {
272 return status;
273 }
274 return dev->SecondLevelMap(vmo, offset, size, perms, false /* map_contiguous */,
275 vaddr, mapped_len);
276 }
277
MapContiguous(uint64_t bus_txn_id,const fbl::RefPtr<VmObject> & vmo,uint64_t offset,size_t size,uint32_t perms,dev_vaddr_t * vaddr,size_t * mapped_len)278 zx_status_t IommuImpl::MapContiguous(uint64_t bus_txn_id, const fbl::RefPtr<VmObject>& vmo,
279 uint64_t offset, size_t size, uint32_t perms,
280 dev_vaddr_t* vaddr, size_t* mapped_len) {
281 DEBUG_ASSERT(vaddr);
282 if (!IS_PAGE_ALIGNED(offset) || size == 0) {
283 return ZX_ERR_INVALID_ARGS;
284 }
285 if (perms & ~(IOMMU_FLAG_PERM_READ | IOMMU_FLAG_PERM_WRITE | IOMMU_FLAG_PERM_EXECUTE)) {
286 return ZX_ERR_INVALID_ARGS;
287 }
288 if (perms == 0) {
289 return ZX_ERR_INVALID_ARGS;
290 }
291 if (!IsValidBusTxnId(bus_txn_id)) {
292 return ZX_ERR_NOT_FOUND;
293 }
294
295 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
296
297 fbl::AutoLock guard(&lock_);
298 DeviceContext* dev;
299 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
300 if (status != ZX_OK) {
301 return status;
302 }
303 return dev->SecondLevelMap(vmo, offset, size, perms, true /* map_contiguous */,
304 vaddr, mapped_len);
305 }
306
Unmap(uint64_t bus_txn_id,dev_vaddr_t vaddr,size_t size)307 zx_status_t IommuImpl::Unmap(uint64_t bus_txn_id, dev_vaddr_t vaddr, size_t size) {
308 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size)) {
309 return ZX_ERR_INVALID_ARGS;
310 }
311 if (!IsValidBusTxnId(bus_txn_id)) {
312 return ZX_ERR_NOT_FOUND;
313 }
314
315 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
316
317 fbl::AutoLock guard(&lock_);
318 DeviceContext* dev;
319 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
320 if (status != ZX_OK) {
321 return status;
322 }
323 status = dev->SecondLevelUnmap(vaddr, size);
324 if (status != ZX_OK) {
325 return status;
326 }
327
328 return ZX_OK;
329 }
330
ClearMappingsForBusTxnId(uint64_t bus_txn_id)331 zx_status_t IommuImpl::ClearMappingsForBusTxnId(uint64_t bus_txn_id) {
332 PANIC_UNIMPLEMENTED;
333 return ZX_ERR_NOT_SUPPORTED;
334 }
335
Initialize()336 zx_status_t IommuImpl::Initialize() {
337 fbl::AutoLock guard(&lock_);
338
339 // Ensure we support this device version
340 auto version = reg::Version::Get().ReadFrom(&mmio_);
341 if (version.major() != 1 && version.minor() != 0) {
342 LTRACEF("Unsupported IOMMU version: %u.%u\n", version.major(), version.minor());
343 return ZX_ERR_NOT_SUPPORTED;
344 }
345
346 // Cache useful capability info
347 caps_ = reg::Capability::Get().ReadFrom(&mmio_);
348 extended_caps_ = reg::ExtendedCapability::Get().ReadFrom(&mmio_);
349
350 max_guest_addr_mask_ = (1ULL << (caps_.max_guest_addr_width() + 1)) - 1;
351 fault_recording_reg_offset_ = static_cast<uint32_t>(
352 caps_.fault_recording_register_offset() * 16);
353 num_fault_recording_reg_ = static_cast<uint32_t>(caps_.num_fault_recording_reg() + 1);
354 iotlb_reg_offset_ = static_cast<uint32_t>(extended_caps_.iotlb_register_offset() * 16);
355
356 constexpr size_t kIoTlbRegisterBankSize = 16;
357 if (iotlb_reg_offset_ > PAGE_SIZE - kIoTlbRegisterBankSize) {
358 LTRACEF("Unsupported IOMMU: IOTLB offset runs past the register page\n");
359 return ZX_ERR_NOT_SUPPORTED;
360 }
361 supports_extended_context_ = extended_caps_.supports_extended_context();
362 if (extended_caps_.supports_pasid()) {
363 valid_pasid_mask_ = static_cast<uint32_t>((1ULL << (extended_caps_.pasid_size() + 1)) - 1);
364 }
365
366 const uint64_t num_domains_raw = caps_.num_domains();
367 if (num_domains_raw > 0x6) {
368 LTRACEF("Unknown num_domains value\n");
369 return ZX_ERR_NOT_SUPPORTED;
370 }
371 const uint32_t num_supported_domains = static_cast<uint32_t>(1ul << (4 + 2 * num_domains_raw));
372 domain_allocator_.set_num_domains(num_supported_domains);
373
374 // Sanity check initial configuration
375 auto global_ctl = reg::GlobalControl::Get().ReadFrom(&mmio_);
376 if (global_ctl.translation_enable()) {
377 LTRACEF("DMA remapping already enabled?!\n");
378 return ZX_ERR_BAD_STATE;
379 }
380 if (global_ctl.interrupt_remap_enable()) {
381 LTRACEF("IRQ remapping already enabled?!\n");
382 return ZX_ERR_BAD_STATE;
383 }
384
385 // Allocate and setup the root table
386 zx_status_t status = IommuPage::AllocatePage(&root_table_page_);
387 if (status != ZX_OK) {
388 LTRACEF("alloc root table failed\n");
389 return status;
390 }
391 status = SetRootTablePointerLocked(root_table_page_.paddr());
392 if (status != ZX_OK) {
393 LTRACEF("set root table failed\n");
394 return status;
395 }
396
397 // Enable interrupts before we enable translation
398 status = ConfigureFaultEventInterruptLocked();
399 if (status != ZX_OK) {
400 LTRACEF("configuring fault event irq failed\n");
401 return status;
402 }
403
404 status = EnableBiosReservedMappingsLocked();
405 if (status != ZX_OK) {
406 LTRACEF("enable bios reserved mappings failed\n");
407 return status;
408 }
409
410 status = SetTranslationEnableLocked(true, zx_time_add_duration(current_time(), ZX_SEC(1)));
411 if (status != ZX_OK) {
412 LTRACEF("set translation enable failed\n");
413 return status;
414 }
415
416 return ZX_OK;
417 }
418
EnableBiosReservedMappingsLocked()419 zx_status_t IommuImpl::EnableBiosReservedMappingsLocked() {
420 auto desc = reinterpret_cast<const zx_iommu_desc_intel_t*>(desc_.get());
421
422 size_t cursor_bytes = 0;
423 while (cursor_bytes + sizeof(zx_iommu_desc_intel_reserved_memory_t) < desc->reserved_memory_bytes) {
424 // The descriptor has already been validated, so no need to check again.
425 auto mem = reinterpret_cast<zx_iommu_desc_intel_reserved_memory_t*>(
426 reinterpret_cast<uintptr_t>(desc) + sizeof(*desc) + desc->scope_bytes +
427 cursor_bytes);
428
429 const size_t num_scopes = mem->scope_bytes / sizeof(zx_iommu_desc_intel_scope_t);
430 auto scopes = reinterpret_cast<zx_iommu_desc_intel_scope_t*>(
431 reinterpret_cast<uintptr_t>(mem) + sizeof(*mem));
432 for (size_t i = 0; i < num_scopes; ++i) {
433 if (scopes[i].num_hops != 1) {
434 // TODO(teisenbe): Implement
435 return ZX_ERR_NOT_SUPPORTED;
436 }
437
438 ds::Bdf bdf;
439 bdf.set_bus(scopes[i].start_bus);
440 bdf.set_dev(static_cast<uint8_t>(scopes[i].dev_func[0] >> 3));
441 bdf.set_func(static_cast<uint8_t>(scopes[i].dev_func[0] & 0x7));
442
443 DeviceContext* dev;
444 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
445 if (status != ZX_OK) {
446 return status;
447 }
448
449 LTRACEF("Enabling region [%lx, %lx) for %02x:%02x.%02x\n", mem->base_addr,
450 mem->base_addr + mem->len, bdf.bus(), bdf.dev(), bdf.func());
451 size_t size = ROUNDUP(mem->len, PAGE_SIZE);
452 const uint32_t perms = IOMMU_FLAG_PERM_READ | IOMMU_FLAG_PERM_WRITE;
453 status = dev->SecondLevelMapIdentity(mem->base_addr, size, perms);
454 if (status != ZX_OK) {
455 return status;
456 }
457 }
458
459 cursor_bytes += sizeof(*mem) + mem->scope_bytes;
460 }
461
462 return ZX_OK;
463 }
464
465 // Sets the root table pointer and invalidates the context-cache and IOTLB.
SetRootTablePointerLocked(paddr_t pa)466 zx_status_t IommuImpl::SetRootTablePointerLocked(paddr_t pa) {
467 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
468
469 auto root_table_addr = reg::RootTableAddress::Get().FromValue(0);
470 // If we support extended contexts, use it.
471 root_table_addr.set_root_table_type(supports_extended_context_);
472 root_table_addr.set_root_table_address(pa >> PAGE_SIZE_SHIFT);
473 root_table_addr.WriteTo(&mmio_);
474
475 auto global_ctl = reg::GlobalControl::Get().ReadFrom(&mmio_);
476 DEBUG_ASSERT(!global_ctl.translation_enable());
477 global_ctl.set_root_table_ptr(1);
478 global_ctl.WriteTo(&mmio_);
479 zx_status_t status = WaitForValueLocked(&global_ctl, &decltype(global_ctl)::root_table_ptr,
480 1, zx_time_add_duration(current_time(), ZX_SEC(1)));
481 if (status != ZX_OK) {
482 LTRACEF("Timed out waiting for root_table_ptr bit to take\n");
483 return status;
484 }
485
486 InvalidateContextCacheGlobalLocked();
487 InvalidateIotlbGlobalLocked();
488
489 return ZX_OK;
490 }
491
SetTranslationEnableLocked(bool enabled,zx_time_t deadline)492 zx_status_t IommuImpl::SetTranslationEnableLocked(bool enabled, zx_time_t deadline) {
493 auto global_ctl = reg::GlobalControl::Get().ReadFrom(&mmio_);
494 global_ctl.set_translation_enable(enabled);
495 global_ctl.WriteTo(&mmio_);
496
497 return WaitForValueLocked(&global_ctl, &decltype(global_ctl)::translation_enable,
498 enabled, deadline);
499 }
500
InvalidateContextCacheGlobalLocked()501 void IommuImpl::InvalidateContextCacheGlobalLocked() {
502 DEBUG_ASSERT(lock_.IsHeld());
503
504 auto context_cmd = reg::ContextCommand::Get().FromValue(0);
505 context_cmd.set_invld_context_cache(1);
506 context_cmd.set_invld_request_granularity(reg::ContextCommand::kGlobalInvld);
507 context_cmd.WriteTo(&mmio_);
508
509 WaitForValueLocked(&context_cmd, &decltype(context_cmd)::invld_context_cache, 0,
510 ZX_TIME_INFINITE);
511 }
512
InvalidateContextCacheDomainLocked(uint32_t domain_id)513 void IommuImpl::InvalidateContextCacheDomainLocked(uint32_t domain_id) {
514 DEBUG_ASSERT(lock_.IsHeld());
515
516 auto context_cmd = reg::ContextCommand::Get().FromValue(0);
517 context_cmd.set_invld_context_cache(1);
518 context_cmd.set_invld_request_granularity(reg::ContextCommand::kDomainInvld);
519 context_cmd.set_domain_id(domain_id);
520 context_cmd.WriteTo(&mmio_);
521
522 WaitForValueLocked(&context_cmd, &decltype(context_cmd)::invld_context_cache, 0,
523 ZX_TIME_INFINITE);
524 }
525
InvalidateContextCacheGlobal()526 void IommuImpl::InvalidateContextCacheGlobal() {
527 fbl::AutoLock guard(&lock_);
528 InvalidateContextCacheGlobalLocked();
529 }
530
InvalidateContextCacheDomain(uint32_t domain_id)531 void IommuImpl::InvalidateContextCacheDomain(uint32_t domain_id) {
532 fbl::AutoLock guard(&lock_);
533 InvalidateContextCacheDomainLocked(domain_id);
534 }
535
InvalidateIotlbGlobalLocked()536 void IommuImpl::InvalidateIotlbGlobalLocked() {
537 DEBUG_ASSERT(lock_.IsHeld());
538 ASSERT(!caps_.required_write_buf_flushing());
539
540 // TODO(teisenbe): Read/write draining?
541 auto iotlb_invld = reg::IotlbInvalidate::Get(iotlb_reg_offset_).ReadFrom(&mmio_);
542 iotlb_invld.set_invld_iotlb(1);
543 iotlb_invld.set_invld_request_granularity(reg::IotlbInvalidate::kGlobalInvld);
544 iotlb_invld.WriteTo(&mmio_);
545
546 WaitForValueLocked(&iotlb_invld, &decltype(iotlb_invld)::invld_iotlb, 0,
547 ZX_TIME_INFINITE);
548 }
549
InvalidateIotlbDomainAllLocked(uint32_t domain_id)550 void IommuImpl::InvalidateIotlbDomainAllLocked(uint32_t domain_id) {
551 DEBUG_ASSERT(lock_.IsHeld());
552 ASSERT(!caps_.required_write_buf_flushing());
553
554 // TODO(teisenbe): Read/write draining?
555 auto iotlb_invld = reg::IotlbInvalidate::Get(iotlb_reg_offset_).ReadFrom(&mmio_);
556 iotlb_invld.set_invld_iotlb(1);
557 iotlb_invld.set_invld_request_granularity(reg::IotlbInvalidate::kDomainAllInvld);
558 iotlb_invld.set_domain_id(domain_id);
559 iotlb_invld.WriteTo(&mmio_);
560
561 WaitForValueLocked(&iotlb_invld, &decltype(iotlb_invld)::invld_iotlb, 0,
562 ZX_TIME_INFINITE);
563 }
564
InvalidateIotlbPageLocked(uint32_t domain_id,dev_vaddr_t vaddr,uint pages_pow2)565 void IommuImpl::InvalidateIotlbPageLocked(uint32_t domain_id, dev_vaddr_t vaddr, uint pages_pow2) {
566 DEBUG_ASSERT(lock_.IsHeld());
567 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
568 DEBUG_ASSERT(pages_pow2 < 64);
569 DEBUG_ASSERT(pages_pow2 <= caps_.max_addr_mask_value());
570 ASSERT(!caps_.required_write_buf_flushing());
571
572 auto invld_addr = reg::InvalidateAddress::Get(iotlb_reg_offset_).FromValue(0);
573 invld_addr.set_address(vaddr >> 12);
574 invld_addr.set_invld_hint(0);
575 invld_addr.set_address_mask(pages_pow2);
576 invld_addr.WriteTo(&mmio_);
577
578 // TODO(teisenbe): Read/write draining?
579 auto iotlb_invld = reg::IotlbInvalidate::Get(iotlb_reg_offset_).ReadFrom(&mmio_);
580 iotlb_invld.set_invld_iotlb(1);
581 iotlb_invld.set_invld_request_granularity(reg::IotlbInvalidate::kDomainPageInvld);
582 iotlb_invld.set_domain_id(domain_id);
583 iotlb_invld.WriteTo(&mmio_);
584
585 WaitForValueLocked(&iotlb_invld, &decltype(iotlb_invld)::invld_iotlb, 0,
586 ZX_TIME_INFINITE);
587 }
588
InvalidateIotlbGlobal()589 void IommuImpl::InvalidateIotlbGlobal() {
590 fbl::AutoLock guard(&lock_);
591 InvalidateIotlbGlobalLocked();
592 }
593
InvalidateIotlbDomainAll(uint32_t domain_id)594 void IommuImpl::InvalidateIotlbDomainAll(uint32_t domain_id) {
595 fbl::AutoLock guard(&lock_);
596 InvalidateIotlbDomainAllLocked(domain_id);
597 }
598
599 template <class RegType>
WaitForValueLocked(RegType * reg,typename RegType::ValueType (RegType::* getter)()const,typename RegType::ValueType value,zx_time_t deadline)600 zx_status_t IommuImpl::WaitForValueLocked(RegType* reg,
601 typename RegType::ValueType (RegType::*getter)() const,
602 typename RegType::ValueType value,
603 zx_time_t deadline) {
604 DEBUG_ASSERT(lock_.IsHeld());
605
606 const zx_time_t kMaxSleepDuration = ZX_USEC(10);
607
608 while (true) {
609 // Read the register and check if it matches the expected value. If
610 // not, sleep for a bit and try again.
611 reg->ReadFrom(&mmio_);
612 if ((reg->*getter)() == value) {
613 return ZX_OK;
614 }
615
616 const zx_time_t now = current_time();
617 if (now > deadline) {
618 break;
619 }
620
621 zx_time_t sleep_deadline = fbl::min(zx_time_add_duration(now, kMaxSleepDuration), deadline);
622 thread_sleep(sleep_deadline);
623 }
624 return ZX_ERR_TIMED_OUT;
625 }
626
FaultHandler(void * ctx)627 interrupt_eoi IommuImpl::FaultHandler(void* ctx) {
628 auto self = static_cast<IommuImpl*>(ctx);
629 auto status = reg::FaultStatus::Get().ReadFrom(&self->mmio_);
630
631 if (!status.primary_pending_fault()) {
632 TRACEF("Non primary fault\n");
633 return IRQ_EOI_DEACTIVATE;
634 }
635
636 auto caps = reg::Capability::Get().ReadFrom(&self->mmio_);
637 const uint32_t num_regs = static_cast<uint32_t>(caps.num_fault_recording_reg() + 1);
638 const uint32_t reg_offset = static_cast<uint32_t>(caps.fault_recording_register_offset() * 16);
639
640 uint32_t index = status.fault_record_index();
641 while (1) {
642 auto rec_high = reg::FaultRecordHigh::Get(reg_offset, index).ReadFrom(&self->mmio_);
643 if (!rec_high.fault()) {
644 break;
645 }
646 auto rec_low = reg::FaultRecordLow::Get(reg_offset, index).ReadFrom(&self->mmio_);
647 uint64_t source = rec_high.source_id();
648 TRACEF("IOMMU Fault: access %c, PASID (%c) %#04lx, reason %#02lx, source %02lx:%02lx.%lx, info: %lx\n",
649 rec_high.request_type() ? 'R' : 'W',
650 rec_high.pasid_present() ? 'V' : '-',
651 rec_high.pasid_value(),
652 rec_high.fault_reason(),
653 source >> 8, (source >> 3) & 0x1f, source & 0x7,
654 rec_low.fault_info() << 12);
655
656 // Clear this fault (RW1CS)
657 rec_high.WriteTo(&self->mmio_);
658
659 ++index;
660 if (index >= num_regs) {
661 index -= num_regs;
662 }
663 }
664
665 status.set_reg_value(0);
666 // Clear the primary fault overflow condition (RW1CS)
667 // TODO(teisenbe): How do we guarantee we get an interrupt on the next fault/if we left a fault unprocessed?
668 status.set_primary_fault_overflow(1);
669 status.WriteTo(&self->mmio_);
670 return IRQ_EOI_DEACTIVATE;
671 }
672
ConfigureFaultEventInterruptLocked()673 zx_status_t IommuImpl::ConfigureFaultEventInterruptLocked() {
674 DEBUG_ASSERT(lock_.IsHeld());
675
676 if (!msi_is_supported()) {
677 return ZX_ERR_NOT_SUPPORTED;
678 }
679 zx_status_t status = msi_alloc_block(1, false/* can_target_64bit */,
680 false /* msi x */, &irq_block_);
681 if (status != ZX_OK) {
682 return status;
683 }
684
685 auto event_data = reg::FaultEventData::Get().FromValue(irq_block_.tgt_data);
686 auto event_addr = reg::FaultEventAddress::Get().FromValue(
687 static_cast<uint32_t>(irq_block_.tgt_addr));
688 auto event_upper_addr = reg::FaultEventUpperAddress::Get().FromValue(
689 static_cast<uint32_t>(irq_block_.tgt_addr >> 32));
690
691 event_data.WriteTo(&mmio_);
692 event_addr.WriteTo(&mmio_);
693 event_upper_addr.WriteTo(&mmio_);
694
695 // Clear all primary fault records
696 for (uint32_t i = 0; i < num_fault_recording_reg_; ++i) {
697 const uint32_t offset = fault_recording_reg_offset_;
698 auto record_high = reg::FaultRecordHigh::Get(offset, i).ReadFrom(&mmio_);
699 record_high.WriteTo(&mmio_);
700 }
701
702 // Clear all pending faults
703 auto fault_status_ctl = reg::FaultStatus::Get().ReadFrom(&mmio_);
704 fault_status_ctl.WriteTo(&mmio_);
705
706 msi_register_handler(&irq_block_, 0, FaultHandler, this);
707
708 // Unmask interrupts
709 auto fault_event_ctl = reg::FaultEventControl::Get().ReadFrom(&mmio_);
710 fault_event_ctl.set_interrupt_mask(0);
711 fault_event_ctl.WriteTo(&mmio_);
712
713 return ZX_OK;
714 }
715
DisableFaultsLocked()716 void IommuImpl::DisableFaultsLocked() {
717 auto fault_event_ctl = reg::FaultEventControl::Get().ReadFrom(&mmio_);
718 fault_event_ctl.set_interrupt_mask(1);
719 fault_event_ctl.WriteTo(&mmio_);
720 }
721
GetOrCreateContextTableLocked(ds::Bdf bdf,ContextTableState ** tbl)722 zx_status_t IommuImpl::GetOrCreateContextTableLocked(ds::Bdf bdf, ContextTableState** tbl) {
723 DEBUG_ASSERT(lock_.IsHeld());
724
725 volatile ds::RootTable* root_table = this->root_table();
726 DEBUG_ASSERT(root_table);
727
728 volatile ds::RootEntrySubentry* target_entry = &root_table->entry[bdf.bus()].lower;
729 if (supports_extended_context_ && bdf.dev() >= 16) {
730 // If this is an extended root table and the device is in the upper half
731 // of the bus address space, use the upper pointer.
732 target_entry = &root_table->entry[bdf.bus()].upper;
733 }
734
735 ds::RootEntrySubentry entry;
736 entry.ReadFrom(target_entry);
737 if (entry.present()) {
738 // We know the entry exists, so search our list of tables for it.
739 for (ContextTableState& context_table : context_tables_) {
740 if (context_table.includes_bdf(bdf)) {
741 *tbl = &context_table;
742 return ZX_OK;
743 }
744 }
745 }
746
747 // Couldn't find the ContextTable, so create it.
748 ktl::unique_ptr<ContextTableState> table;
749 zx_status_t status = ContextTableState::Create(static_cast<uint8_t>(bdf.bus()),
750 supports_extended_context_,
751 bdf.dev() >= 16 /* upper */,
752 this, target_entry, &table);
753 if (status != ZX_OK) {
754 return status;
755 }
756
757 *tbl = table.get();
758 context_tables_.push_back(ktl::move(table));
759
760 return ZX_OK;
761 }
762
GetOrCreateDeviceContextLocked(ds::Bdf bdf,DeviceContext ** context)763 zx_status_t IommuImpl::GetOrCreateDeviceContextLocked(ds::Bdf bdf, DeviceContext** context) {
764 DEBUG_ASSERT(lock_.IsHeld());
765
766 ContextTableState* ctx_table_state;
767 zx_status_t status = GetOrCreateContextTableLocked(bdf, &ctx_table_state);
768 if (status != ZX_OK) {
769 return status;
770 }
771
772 status = ctx_table_state->GetDeviceContext(bdf, context);
773 if (status != ZX_ERR_NOT_FOUND) {
774 // Either status was ZX_OK and we're done, or some error occurred.
775 return status;
776 }
777
778 uint32_t domain_id;
779 status = domain_allocator_.Allocate(&domain_id);
780 if (status != ZX_OK) {
781 return status;
782 }
783 return ctx_table_state->CreateDeviceContext(bdf, domain_id, context);
784 }
785
minimum_contiguity(uint64_t bus_txn_id)786 uint64_t IommuImpl::minimum_contiguity(uint64_t bus_txn_id) {
787 if (!IsValidBusTxnId(bus_txn_id)) {
788 return 0;
789 }
790
791 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
792
793 fbl::AutoLock guard(&lock_);
794 DeviceContext* dev;
795 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
796 if (status != ZX_OK) {
797 return status;
798 }
799
800 return dev->minimum_contiguity();
801 }
802
aspace_size(uint64_t bus_txn_id)803 uint64_t IommuImpl::aspace_size(uint64_t bus_txn_id) {
804 if (!IsValidBusTxnId(bus_txn_id)) {
805 return 0;
806 }
807
808 ds::Bdf bdf = decode_bus_txn_id(bus_txn_id);
809
810 fbl::AutoLock guard(&lock_);
811 DeviceContext* dev;
812 zx_status_t status = GetOrCreateDeviceContextLocked(bdf, &dev);
813 if (status != ZX_OK) {
814 return status;
815 }
816
817 return dev->aspace_size();
818 }
819
820 } // namespace intel_iommu
821