1 // Copyright 2018 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include "second_level_pt.h"
8
9 #include <arch/x86/mmu.h>
10
11 #include "device_context.h"
12 #include "iommu_impl.h"
13
14 #define LOCAL_TRACE 0
15
16 namespace {
17
18 constexpr X86PageTableBase::PtFlags kSlptRead = 1u << 0;
19 constexpr X86PageTableBase::PtFlags kSlptWrite = 1u << 1;
20 constexpr X86PageTableBase::PtFlags kSlptExecute = 1u << 2;
21
compute_vaddr_mask(PageTableLevel top_level)22 vaddr_t compute_vaddr_mask(PageTableLevel top_level) {
23 uint width;
24 switch (top_level) {
25 case PD_L: width = 30; break;
26 case PDP_L: width = 39; break;
27 case PML4_L: width = 48; break;
28 default: panic("Unsupported iommu width\n");
29 }
30
31 // Valid vaddrs for mapping should be page-aligned and not larger than the
32 // width of the top level.
33 return ((1ull << width) - 1) & ~(PAGE_SIZE - 1);
34 }
35
36 } // namespace
37
38 namespace intel_iommu {
39
SecondLevelPageTable(IommuImpl * iommu,DeviceContext * parent)40 SecondLevelPageTable::SecondLevelPageTable(IommuImpl* iommu, DeviceContext* parent)
41 : iommu_(iommu),
42 parent_(parent),
43 needs_flushes_(!iommu->extended_caps()->page_walk_coherency()),
44 supports_2mb_(iommu->caps()->supports_second_level_2mb_page()),
45 supports_1gb_(iommu->caps()->supports_second_level_1gb_page()),
46 initialized_(false) {
47 }
48
~SecondLevelPageTable()49 SecondLevelPageTable::~SecondLevelPageTable() {
50 DEBUG_ASSERT(!initialized_);
51 }
52
Init(PageTableLevel top_level)53 zx_status_t SecondLevelPageTable::Init(PageTableLevel top_level) {
54 DEBUG_ASSERT(!initialized_);
55
56 top_level_ = top_level;
57 valid_vaddr_mask_ = compute_vaddr_mask(top_level);
58 zx_status_t status = X86PageTableBase::Init(nullptr);
59 if (status != ZX_OK) {
60 return status;
61 }
62 initialized_ = true;
63 return ZX_OK;
64 }
65
Destroy()66 void SecondLevelPageTable::Destroy() {
67 if (!initialized_) {
68 return;
69 }
70
71 size_t size = valid_vaddr_mask_ + PAGE_SIZE;
72 initialized_ = false;
73 X86PageTableBase::Destroy(0, size);
74 }
75
allowed_flags(uint flags)76 bool SecondLevelPageTable::allowed_flags(uint flags) {
77 constexpr uint kSupportedFlags =
78 ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE | ARCH_MMU_FLAG_PERM_EXECUTE;
79 if (flags & ~kSupportedFlags) {
80 return false;
81 }
82 return true;
83 }
84
85 // Validation for host physical addresses
check_paddr(paddr_t paddr)86 bool SecondLevelPageTable::check_paddr(paddr_t paddr) {
87 return x86_mmu_check_paddr(paddr);
88 }
89
90 // Validation for virtual physical addresses
check_vaddr(vaddr_t vaddr)91 bool SecondLevelPageTable::check_vaddr(vaddr_t vaddr) {
92 return !(vaddr & ~valid_vaddr_mask_);
93 }
94
supports_page_size(PageTableLevel level)95 bool SecondLevelPageTable::supports_page_size(PageTableLevel level) {
96 switch (level) {
97 case PT_L: return true;
98 case PD_L: return supports_2mb_;
99 case PDP_L: return supports_1gb_;
100 default: return false;
101 }
102 }
103
intermediate_flags()104 X86PageTableBase::IntermediatePtFlags SecondLevelPageTable::intermediate_flags() {
105 return kSlptRead | kSlptWrite | kSlptExecute;
106 }
107
terminal_flags(PageTableLevel level,uint flags)108 X86PageTableBase::PtFlags SecondLevelPageTable::terminal_flags(PageTableLevel level,
109 uint flags) {
110 X86PageTableBase::PtFlags terminal_flags = 0;
111
112 if (flags & ARCH_MMU_FLAG_PERM_READ) {
113 terminal_flags |= kSlptRead;
114 }
115 if (flags & ARCH_MMU_FLAG_PERM_WRITE) {
116 terminal_flags |= kSlptWrite;
117 }
118 if (flags & ARCH_MMU_FLAG_PERM_EXECUTE) {
119 terminal_flags |= kSlptExecute;
120 }
121
122 return terminal_flags;
123 }
124
split_flags(PageTableLevel level,PtFlags flags)125 X86PageTableBase::PtFlags SecondLevelPageTable::split_flags(PageTableLevel level,
126 PtFlags flags) {
127 // We don't need to relocate any flags on split
128 return flags;
129 }
130
131 // We disable thread safety analysis here, since the lock being held is being
132 // held across the MMU operations, but goes through code that is not aware of
133 // the lock.
TlbInvalidate(PendingTlbInvalidation * pending)134 void SecondLevelPageTable::TlbInvalidate(PendingTlbInvalidation* pending)
135 TA_NO_THREAD_SAFETY_ANALYSIS {
136
137 DEBUG_ASSERT(!pending->contains_global);
138
139 if (pending->full_shootdown) {
140 iommu_->InvalidateIotlbDomainAllLocked(parent_->domain_id());
141 pending->clear();
142 return;
143 }
144
145 constexpr uint kBitsPerLevel = 9;
146 for (uint i = 0; i < pending->count; ++i) {
147 const auto& item = pending->item[i];
148 uint address_mask = kBitsPerLevel * static_cast<uint>(item.page_level());
149
150 if (!item.is_terminal()) {
151 // If this is non-terminal, force the paging-structure cache to be
152 // cleared for this address still, even though a terminal mapping hasn't
153 // been changed.
154 // TODO(teisenbe): Not completely sure this is necessary. Including for
155 // now out of caution.
156 address_mask = 0;
157 }
158 iommu_->InvalidateIotlbPageLocked(parent_->domain_id(), item.addr(), address_mask);
159 }
160 pending->clear();
161 }
162
pt_flags_to_mmu_flags(PtFlags flags,PageTableLevel level)163 uint SecondLevelPageTable::pt_flags_to_mmu_flags(PtFlags flags, PageTableLevel level) {
164 uint mmu_flags = 0;
165
166 if (flags & kSlptRead) {
167 mmu_flags |= ARCH_MMU_FLAG_PERM_READ;
168 }
169 if (flags & kSlptWrite) {
170 mmu_flags |= ARCH_MMU_FLAG_PERM_WRITE;
171 }
172 if (flags & kSlptExecute) {
173 mmu_flags |= ARCH_MMU_FLAG_PERM_EXECUTE;
174 }
175
176 return mmu_flags;
177 }
178
179 } // namespace intel_iommu
180