1 // Copyright 2018 The Fuchsia Authors 2 // 3 // Use of this source code is governed by a MIT-style 4 // license that can be found in the LICENSE file or at 5 // https://opensource.org/licenses/MIT 6 7 #pragma once 8 9 #include <arch/ops.h> 10 #include <err.h> 11 #include <hwreg/bitfields.h> 12 #include <kernel/atomic.h> 13 #include <stdint.h> 14 #include <zircon/compiler.h> 15 16 namespace intel_iommu { 17 18 namespace reg { 19 20 class Version : public hwreg::RegisterBase<Version, uint32_t> { 21 public: 22 static constexpr uint32_t kAddr = 0x0; Get()23 static auto Get() { return hwreg::RegisterAddr<Version>(kAddr); } 24 25 DEF_FIELD(3, 0, minor); 26 DEF_FIELD(7, 4, major); 27 DEF_RSVDZ_FIELD(31, 8); 28 }; 29 30 class Capability : public hwreg::RegisterBase<Capability, uint64_t> { 31 public: 32 static constexpr uint32_t kAddr = 0x8; Get()33 static auto Get() { return hwreg::RegisterAddr<Capability>(kAddr); } 34 35 DEF_FIELD(2, 0, num_domains); 36 DEF_BIT(3, adv_fault_logging); 37 DEF_BIT(4, required_write_buf_flushing); 38 DEF_BIT(5, supports_protected_low_mem); 39 DEF_BIT(6, supports_protected_high_mem); 40 DEF_BIT(7, caching_mode); 41 DEF_RSVDZ_BIT(8); 42 DEF_BIT(9, supports_39_bit_agaw); 43 DEF_BIT(10, supports_48_bit_agaw); 44 DEF_RSVDZ_BIT(11); 45 DEF_RSVDZ_BIT(12); 46 DEF_RSVDZ_FIELD(15, 13); 47 DEF_FIELD(21, 16, max_guest_addr_width); 48 DEF_BIT(22, supports_zero_length_read); 49 DEF_RSVDZ_BIT(23); 50 DEF_FIELD(33, 24, fault_recording_register_offset); 51 DEF_BIT(34, supports_second_level_2mb_page); 52 DEF_BIT(35, supports_second_level_1gb_page); 53 DEF_RSVDZ_FIELD(37, 36); 54 DEF_RSVDZ_BIT(38); 55 DEF_BIT(39, supports_page_selective_invld); 56 DEF_FIELD(47, 40, num_fault_recording_reg); 57 DEF_FIELD(53, 48, max_addr_mask_value); 58 DEF_BIT(54, supports_write_draining); 59 DEF_BIT(55, supports_read_draining); 60 DEF_BIT(56, supports_first_level_1gb_page); 61 DEF_RSVDZ_FIELD(58, 57); 62 DEF_BIT(59, supports_posted_interrupts); 63 DEF_RSVDZ_FIELD(63, 60); 64 }; 65 66 class ExtendedCapability : public hwreg::RegisterBase<ExtendedCapability, uint64_t> { 67 public: 68 static constexpr uint32_t kAddr = 0x10; Get()69 static auto Get() { return hwreg::RegisterAddr<ExtendedCapability>(kAddr); } 70 71 DEF_BIT(0, page_walk_coherency); 72 DEF_BIT(1, supports_queued_invld); 73 DEF_BIT(2, supports_device_tlb); 74 DEF_BIT(3, supports_interrupt_remapping); 75 DEF_BIT(4, supports_extended_interrupt_mode); 76 DEF_BIT(6, supports_pass_through); 77 DEF_BIT(7, supports_snoop_control); 78 DEF_FIELD(17, 8, iotlb_register_offset); 79 DEF_RSVDZ_FIELD(19, 18); 80 DEF_FIELD(23, 20, max_handle_mask_value); 81 DEF_BIT(24, supports_extended_context); 82 DEF_BIT(25, supports_memory_type); 83 DEF_BIT(26, supports_nested_translation); 84 DEF_BIT(27, supports_deferred_invld); 85 DEF_BIT(28, supports_pasid); 86 DEF_BIT(29, supports_page_requests); 87 DEF_BIT(30, supports_execute_requests); 88 DEF_BIT(31, supports_supervisor_requests); 89 DEF_RSVDZ_BIT(32); 90 DEF_BIT(33, supports_no_write_flag); 91 DEF_BIT(34, supports_extended_accessed_flag); 92 DEF_FIELD(39, 35, pasid_size); 93 DEF_RSVDZ_FIELD(63, 40); 94 }; 95 96 // This is a merger of the Global Command and Global Status registers. 97 class GlobalControl : public hwreg::RegisterBase<GlobalControl, uint32_t> { 98 public: 99 static constexpr uint32_t kWriteAddr = 0x18; 100 static constexpr uint32_t kReadAddr = 0x1c; Get()101 static auto Get() { return hwreg::RegisterAddr<GlobalControl>(kReadAddr); } 102 103 DEF_RSVDZ_FIELD(22, 0); 104 DEF_BIT(23, compat_format_interrupt); 105 DEF_BIT(24, interrupt_remap_table_ptr); 106 DEF_BIT(25, interrupt_remap_enable); 107 DEF_BIT(26, queued_invld_enable); 108 DEF_BIT(27, write_buffer_flush); 109 DEF_BIT(28, adv_fault_logging_enable); 110 DEF_BIT(29, fault_log); 111 DEF_BIT(30, root_table_ptr); 112 DEF_BIT(31, translation_enable); 113 114 // This redefines functions from RegisterBase which are not virtual. 115 // This is safe, since no callers operate on this type as its base class. ReadFrom(hwreg::RegisterIo * reg_io)116 GlobalControl& ReadFrom(hwreg::RegisterIo* reg_io) { 117 hwreg::RegisterBase<GlobalControl, uint32_t>::set_reg_addr(kReadAddr); 118 return hwreg::RegisterBase<GlobalControl, uint32_t>::ReadFrom(reg_io); 119 } WriteTo(hwreg::RegisterIo * reg_io)120 GlobalControl& WriteTo(hwreg::RegisterIo* reg_io) { 121 hwreg::RegisterBase<GlobalControl, uint32_t>::set_reg_addr(kWriteAddr); 122 return hwreg::RegisterBase<GlobalControl, uint32_t>::WriteTo(reg_io); 123 } 124 }; 125 126 class RootTableAddress : public hwreg::RegisterBase<RootTableAddress, uint64_t> { 127 public: 128 static constexpr uint32_t kAddr = 0x20; Get()129 static auto Get() { return hwreg::RegisterAddr<RootTableAddress>(kAddr); } 130 131 DEF_RSVDZ_FIELD(10, 0); 132 DEF_BIT(11, root_table_type); 133 DEF_FIELD(63, 12, root_table_address); 134 }; 135 136 class ContextCommand : public hwreg::RegisterBase<ContextCommand, uint64_t> { 137 public: 138 static constexpr uint32_t kAddr = 0x28; Get()139 static auto Get() { return hwreg::RegisterAddr<ContextCommand>(kAddr); } 140 141 DEF_FIELD(15, 0, domain_id); 142 DEF_FIELD(31, 16, source_id); 143 DEF_FIELD(33, 32, function_mask); 144 DEF_RSVDZ_FIELD(58, 34); 145 DEF_FIELD(60, 59, actual_invld_granularity); 146 DEF_FIELD(62, 61, invld_request_granularity); 147 DEF_BIT(63, invld_context_cache); 148 149 enum Granularity { 150 kGlobalInvld = 0b01, 151 kDomainInvld = 0b10, 152 kDeviceInvld = 0b11, 153 }; 154 }; 155 156 class InvalidateAddress : public hwreg::RegisterBase<InvalidateAddress, uint64_t> { 157 public: 158 static constexpr uint32_t kInstanceOffset = 0x0; Get(uint32_t iotlb_base)159 static auto Get(uint32_t iotlb_base) { 160 return hwreg::RegisterAddr<InvalidateAddress>(iotlb_base + kInstanceOffset); 161 } 162 163 DEF_FIELD(5, 0, address_mask); 164 DEF_BIT(6, invld_hint); 165 DEF_RSVDZ_FIELD(11, 7); 166 DEF_FIELD(63, 12, address); 167 }; 168 169 class IotlbInvalidate : public hwreg::RegisterBase<IotlbInvalidate, uint64_t> { 170 public: 171 static constexpr uint32_t kInstanceOffset = 0x08; Get(uint32_t iotlb_base)172 static auto Get(uint32_t iotlb_base) { 173 return hwreg::RegisterAddr<IotlbInvalidate>(iotlb_base + kInstanceOffset); 174 } 175 176 DEF_FIELD(47, 32, domain_id); 177 DEF_BIT(48, drain_writes); 178 DEF_BIT(49, drain_reads); 179 DEF_RSVDZ_FIELD(56, 50); 180 DEF_FIELD(58, 57, actual_invld_granularity); 181 DEF_RSVDZ_BIT(59); 182 DEF_FIELD(61, 60, invld_request_granularity); 183 DEF_RSVDZ_BIT(62); 184 DEF_BIT(63, invld_iotlb); 185 186 enum Granularity { 187 kGlobalInvld = 0b01, 188 kDomainAllInvld = 0b10, 189 kDomainPageInvld = 0b11, 190 }; 191 }; 192 193 class FaultStatus : public hwreg::RegisterBase<FaultStatus, uint32_t> { 194 public: 195 static constexpr uint32_t kAddr = 0x34; Get()196 static auto Get() { return hwreg::RegisterAddr<FaultStatus>(kAddr); } 197 198 DEF_BIT(0, primary_fault_overflow); 199 DEF_BIT(1, primary_pending_fault); 200 DEF_BIT(2, adv_fault_overflow); 201 DEF_BIT(3, adv_pending_fault); 202 DEF_BIT(4, invld_queue_error); 203 DEF_BIT(5, invld_completion_error); 204 DEF_BIT(6, invld_timeout_error); 205 DEF_BIT(7, page_request_overflow); 206 DEF_FIELD(15, 8, fault_record_index); 207 DEF_RSVDZ_FIELD(31, 16); 208 }; 209 210 class FaultEventControl : public hwreg::RegisterBase<FaultEventControl, uint32_t> { 211 public: 212 static constexpr uint32_t kAddr = 0x38; Get()213 static auto Get() { return hwreg::RegisterAddr<FaultEventControl>(kAddr); } 214 215 DEF_BIT(30, interrupt_pending); 216 DEF_BIT(31, interrupt_mask); 217 }; 218 219 class FaultEventData : public hwreg::RegisterBase<FaultEventData, uint32_t> { 220 public: 221 static constexpr uint32_t kAddr = 0x3c; Get()222 static auto Get() { return hwreg::RegisterAddr<FaultEventData>(kAddr); } 223 224 DEF_FIELD(15, 0, interrupt_message_data); 225 DEF_FIELD(31, 16, extended_interrupt_message_data); 226 }; 227 228 class FaultEventAddress : public hwreg::RegisterBase<FaultEventAddress, uint32_t> { 229 public: 230 static constexpr uint32_t kAddr = 0x40; Get()231 static auto Get() { return hwreg::RegisterAddr<FaultEventAddress>(kAddr); } 232 233 DEF_RSVDZ_FIELD(1, 0); 234 DEF_FIELD(31, 2, message_address); 235 }; 236 237 class FaultEventUpperAddress : public hwreg::RegisterBase<FaultEventUpperAddress, uint32_t> { 238 public: 239 static constexpr uint32_t kAddr = 0x44; Get()240 static auto Get() { return hwreg::RegisterAddr<FaultEventUpperAddress>(kAddr); } 241 242 DEF_FIELD(31, 0, message_upper_address); 243 }; 244 245 class FaultRecordLow : public hwreg::RegisterBase<FaultRecordLow, uint64_t> { 246 public: 247 static constexpr uint32_t kInstanceOffset = 0x0; Get(uint32_t fault_record_base,uint32_t index)248 static auto Get(uint32_t fault_record_base, uint32_t index) { 249 return hwreg::RegisterAddr<FaultRecordLow>(fault_record_base + 16 * index + 250 kInstanceOffset); 251 } 252 253 DEF_RSVDZ_FIELD(11, 0); 254 DEF_FIELD(63, 12, fault_info); 255 }; 256 257 class FaultRecordHigh : public hwreg::RegisterBase<FaultRecordHigh, uint64_t> { 258 public: 259 static constexpr uint32_t kInstanceOffset = 0x8; Get(uint32_t fault_record_base,uint32_t index)260 static auto Get(uint32_t fault_record_base, uint32_t index) { 261 return hwreg::RegisterAddr<FaultRecordHigh>(fault_record_base + 16 * index + 262 kInstanceOffset); 263 } 264 265 DEF_FIELD(15, 0, source_id); 266 DEF_RSVDZ_FIELD(28, 16); 267 DEF_BIT(29, supervisor_mode_requested); 268 DEF_BIT(30, execute_permission_requested); 269 DEF_BIT(31, pasid_present); 270 DEF_FIELD(39, 32, fault_reason); 271 DEF_FIELD(59, 40, pasid_value); 272 DEF_FIELD(61, 60, address_type); 273 DEF_BIT(62, request_type); 274 DEF_BIT(63, fault); 275 }; 276 277 } // namespace reg 278 279 namespace ds { 280 281 struct Bdf { 282 uint16_t raw = 0; 283 284 DEF_SUBFIELD(raw, 15, 8, bus); 285 DEF_SUBFIELD(raw, 7, 3, dev); 286 DEF_SUBFIELD(raw, 2, 0, func); 287 288 bool operator==(const Bdf& other) const { 289 return raw == other.raw; 290 } 291 packed_dev_and_funcBdf292 uint8_t packed_dev_and_func() const { 293 return static_cast<uint8_t>(raw); 294 } 295 }; 296 297 struct RootEntrySubentry { 298 uint64_t raw; 299 300 DEF_SUBBIT(raw, 0, present); 301 DEF_SUBFIELD(raw, 63, 12, context_table); 302 ReadFromRootEntrySubentry303 void ReadFrom(volatile RootEntrySubentry* dst) { 304 raw = dst->raw; 305 } WriteToRootEntrySubentry306 void WriteTo(volatile RootEntrySubentry* dst) { 307 dst->raw = raw; 308 309 // Hardware access to root entries may not be coherent, so flush just in case. 310 arch_clean_cache_range(reinterpret_cast<addr_t>(dst), sizeof(*dst)); 311 } 312 }; 313 314 struct RootEntry { 315 RootEntrySubentry lower; 316 RootEntrySubentry upper; 317 }; 318 static_assert(fbl::is_pod<RootEntry>::value, "not POD"); 319 static_assert(sizeof(RootEntry) == 16, "wrong size"); 320 321 struct RootTable { 322 static constexpr size_t kNumEntries = 256; 323 RootEntry entry[kNumEntries]; 324 }; 325 static_assert(fbl::is_pod<RootTable>::value, "not POD"); 326 static_assert(sizeof(RootTable) == 4096, "wrong size"); 327 328 struct ContextEntry { 329 uint64_t raw[2]; 330 331 DEF_SUBBIT(raw[0], 0, present); 332 DEF_SUBBIT(raw[0], 1, fault_processing_disable); 333 DEF_SUBFIELD(raw[0], 3, 2, translation_type); 334 DEF_SUBFIELD(raw[0], 63, 12, second_level_pt_ptr); 335 DEF_SUBFIELD(raw[1], 2, 0, address_width); 336 DEF_SUBFIELD(raw[1], 6, 3, hw_ignored); 337 DEF_SUBFIELD(raw[1], 23, 8, domain_id); 338 ReadFromContextEntry339 void ReadFrom(volatile ContextEntry* dst) { 340 raw[0] = dst->raw[0]; 341 raw[1] = dst->raw[1]; 342 } 343 WriteToContextEntry344 void WriteTo(volatile ContextEntry* dst) { 345 // Write word with present bit last 346 dst->raw[1] = raw[1]; 347 dst->raw[0] = raw[0]; 348 349 // Hardware access to context entries may not be coherent, so flush just in case. 350 arch_clean_cache_range(reinterpret_cast<addr_t>(dst), sizeof(*dst)); 351 } 352 353 // clang-format off 354 enum TranslationType { 355 kDeviceTlbDisabled = 0b00, 356 kDeviceTlbEnabled = 0b01, 357 kPassThrough = 0b10, 358 }; 359 // clang-format on 360 361 enum AddressWidth { 362 k30Bit = 0b000, 363 k39Bit = 0b001, 364 k48Bit = 0b010, 365 k57Bit = 0b011, 366 k64Bit = 0b100, 367 }; 368 }; 369 static_assert(fbl::is_pod<ContextEntry>::value, "not POD"); 370 static_assert(sizeof(ContextEntry) == 16, "wrong size"); 371 372 struct ContextTable { 373 static constexpr size_t kNumEntries = 256; 374 ContextEntry entry[kNumEntries]; 375 }; 376 static_assert(fbl::is_pod<ContextTable>::value, "not POD"); 377 static_assert(sizeof(ContextTable) == 4096, "wrong size"); 378 379 struct ExtendedContextEntry { 380 uint64_t raw[4]; 381 382 DEF_SUBBIT(raw[0], 0, present); 383 DEF_SUBBIT(raw[0], 1, fault_processing_disable); 384 DEF_SUBFIELD(raw[0], 4, 2, translation_type); 385 DEF_SUBFIELD(raw[0], 7, 5, extended_mem_type); 386 DEF_SUBBIT(raw[0], 8, deferred_invld_enable); 387 DEF_SUBBIT(raw[0], 9, page_request_enable); 388 DEF_SUBBIT(raw[0], 10, nested_translation_enable); 389 DEF_SUBBIT(raw[0], 11, pasid_enable); 390 DEF_SUBFIELD(raw[0], 63, 12, second_level_pt_ptr); 391 392 DEF_SUBFIELD(raw[1], 2, 0, address_width); 393 DEF_SUBBIT(raw[1], 3, global_page_enable); 394 DEF_SUBBIT(raw[1], 4, no_exec_enable); 395 DEF_SUBBIT(raw[1], 5, write_protect_enable); 396 DEF_SUBBIT(raw[1], 6, cache_disable); 397 DEF_SUBBIT(raw[1], 7, extended_mem_type_enable); 398 DEF_SUBFIELD(raw[1], 23, 8, domain_id); 399 DEF_SUBBIT(raw[1], 24, smep_enable); 400 DEF_SUBBIT(raw[1], 25, extended_accessed_flag_enable); 401 DEF_SUBBIT(raw[1], 26, execute_requests_enable); 402 DEF_SUBBIT(raw[1], 27, second_level_execute_bit_enable); 403 DEF_SUBFIELD(raw[1], 63, 32, page_attribute_table); 404 405 DEF_SUBFIELD(raw[2], 3, 0, pasid_table_size); 406 DEF_SUBFIELD(raw[2], 63, 12, pasid_table_ptr); 407 408 DEF_SUBFIELD(raw[3], 63, 12, pasid_state_table_ptr); 409 ReadFromExtendedContextEntry410 void ReadFrom(volatile ExtendedContextEntry* dst) { 411 raw[0] = dst->raw[0]; 412 raw[1] = dst->raw[1]; 413 raw[2] = dst->raw[2]; 414 raw[3] = dst->raw[3]; 415 } 416 WriteToExtendedContextEntry417 void WriteTo(volatile ExtendedContextEntry* dst) { 418 dst->raw[1] = raw[1]; 419 dst->raw[2] = raw[2]; 420 dst->raw[3] = raw[3]; 421 // Write word with present bit last 422 dst->raw[0] = raw[0]; 423 424 // Hardware access to extended-context entries may not be coherent, so flush just in case. 425 arch_clean_cache_range(reinterpret_cast<addr_t>(dst), sizeof(*dst)); 426 } 427 428 // clang-format off 429 enum TranslationType { 430 kHostModeWithDeviceTlbDisabled = 0b000, 431 kHostModeWithDeviceTlbEnabled = 0b001, 432 kPassThrough = 0b010, 433 kGuestModeWithDeviceTlbDisabled = 0b100, 434 kGuestModeWithDeviceTlbEnabled = 0b101, 435 }; 436 // clang-format on 437 438 enum AddressWidth { 439 k30Bit = 0b000, 440 k39Bit = 0b001, 441 k48Bit = 0b010, 442 k57Bit = 0b011, 443 k64Bit = 0b100, 444 }; 445 }; 446 static_assert(fbl::is_pod<ExtendedContextEntry>::value, "not POD"); 447 static_assert(sizeof(ExtendedContextEntry) == 32, "wrong size"); 448 449 struct ExtendedContextTable { 450 static constexpr size_t kNumEntries = 128; 451 ExtendedContextEntry entry[kNumEntries]; 452 }; 453 static_assert(fbl::is_pod<ExtendedContextTable>::value, "not POD"); 454 static_assert(sizeof(ExtendedContextTable) == 4096, "wrong size"); 455 456 struct PasidEntry { 457 uint64_t raw; 458 459 DEF_SUBBIT(raw, 0, present); 460 DEF_SUBBIT(raw, 3, page_level_write_through); 461 DEF_SUBBIT(raw, 4, page_level_cache_disable); 462 DEF_SUBBIT(raw, 11, supervisor_requests_enable); 463 DEF_SUBFIELD(raw, 63, 12, first_level_pt_ptr); 464 WriteToPasidEntry465 void WriteTo(volatile PasidEntry* dst) { 466 dst->raw = raw; 467 } 468 }; 469 static_assert(fbl::is_pod<PasidEntry>::value, "not POD"); 470 static_assert(sizeof(PasidEntry) == 8, "wrong size"); 471 472 struct PasidState { 473 volatile uint64_t raw; 474 475 //DEF_SUBFIELD(raw, 47, 32, active_ref_count); 476 //DEF_SUBBIT(raw, 63, deferred_invld); 477 active_ref_countPasidState478 uint64_t active_ref_count() { 479 return (atomic_load_u64(&raw) >> 32) & 0xffff; 480 } 481 deferred_invldPasidState482 uint64_t deferred_invld() { 483 return atomic_load_u64(&raw) >> 63; 484 } set_deferred_invldPasidState485 void set_deferred_invld() { 486 // The specification is unclear as to how to update this field. This is 487 // an in-memory data structure, and the active_ref_count field is specified 488 // as being updated atomically by hardware. Reading that "atomically" 489 // to be an atomic memory access, this atomic_or should be the right 490 // thing. 491 atomic_or_u64(&raw, 1ull << 63); 492 } 493 }; 494 static_assert(fbl::is_pod<PasidState>::value, "not POD"); 495 static_assert(sizeof(PasidState) == 8, "wrong size"); 496 497 } // namespace ds 498 499 } // namespace intel_iommu 500