1 // Copyright 2017 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7
8 #include <object/pinned_memory_token_dispatcher.h>
9
10 #include <assert.h>
11 #include <err.h>
12 #include <vm/pinned_vm_object.h>
13 #include <vm/vm.h>
14 #include <vm/vm_object.h>
15 #include <fbl/algorithm.h>
16 #include <fbl/auto_call.h>
17 #include <fbl/auto_lock.h>
18 #include <new>
19 #include <object/bus_transaction_initiator_dispatcher.h>
20 #include <trace.h>
21
22 #define LOCAL_TRACE 0
23
Create(fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,PinnedVmObject pinned_vmo,uint32_t perms,fbl::RefPtr<Dispatcher> * dispatcher,zx_rights_t * rights)24 zx_status_t PinnedMemoryTokenDispatcher::Create(fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,
25 PinnedVmObject pinned_vmo, uint32_t perms,
26 fbl::RefPtr<Dispatcher>* dispatcher,
27 zx_rights_t* rights) {
28 LTRACE_ENTRY;
29 DEBUG_ASSERT(IS_PAGE_ALIGNED(pinned_vmo.offset()) && IS_PAGE_ALIGNED(pinned_vmo.size()));
30
31 const size_t min_contig = bti->minimum_contiguity();
32 DEBUG_ASSERT(fbl::is_pow2(min_contig));
33
34 fbl::AllocChecker ac;
35 const size_t num_addrs = ROUNDUP(pinned_vmo.size(), min_contig) / min_contig;
36 fbl::Array<dev_vaddr_t> addr_array(new (&ac) dev_vaddr_t[num_addrs], num_addrs);
37 if (!ac.check()) {
38 return ZX_ERR_NO_MEMORY;
39 }
40
41 auto pmo = fbl::AdoptRef(new (&ac) PinnedMemoryTokenDispatcher(ktl::move(bti),
42 ktl::move(pinned_vmo),
43 ktl::move(addr_array)));
44 if (!ac.check()) {
45 return ZX_ERR_NO_MEMORY;
46 }
47
48 zx_status_t status = pmo->MapIntoIommu(perms);
49 if (status != ZX_OK) {
50 LTRACEF("MapIntoIommu failed: %d\n", status);
51 return status;
52 }
53
54 // Create must be called with the BTI's lock held, so this is safe to
55 // invoke.
56 [&]() TA_NO_THREAD_SAFETY_ANALYSIS {
57 pmo->bti_->AddPmoLocked(pmo.get());
58 }();
59
60 *dispatcher = ktl::move(pmo);
61 *rights = default_rights();
62 return ZX_OK;
63 }
64
65 // Used during initialization to set up the IOMMU state for this PMT.
66 //
67 // We disable thread-safety analysis here, because this is part of the
68 // initialization routine before other threads have access to this dispatcher.
MapIntoIommu(uint32_t perms)69 zx_status_t PinnedMemoryTokenDispatcher::MapIntoIommu(uint32_t perms) TA_NO_THREAD_SAFETY_ANALYSIS {
70 const uint64_t bti_id = bti_->bti_id();
71 const size_t min_contig = bti_->minimum_contiguity();
72 if (pinned_vmo_.vmo()->is_contiguous()) {
73 dev_vaddr_t vaddr;
74 size_t mapped_len;
75
76 // Usermode drivers assume that if they requested a contiguous buffer in
77 // memory, then the physical addresses will be contiguous. Return an
78 // error if we can't acutally map the address contiguously.
79 zx_status_t status = bti_->iommu()->MapContiguous(bti_id, pinned_vmo_.vmo(),
80 pinned_vmo_.offset(), pinned_vmo_.size(),
81 perms, &vaddr, &mapped_len);
82 if (status != ZX_OK) {
83 return status;
84 }
85
86 DEBUG_ASSERT(vaddr % min_contig == 0);
87 mapped_addrs_[0] = vaddr;
88 for (size_t i = 1; i < mapped_addrs_.size(); ++i) {
89 mapped_addrs_[i] = mapped_addrs_[i - 1] + min_contig;
90 }
91 return ZX_OK;
92 }
93
94 size_t remaining = pinned_vmo_.size();
95 uint64_t curr_offset = pinned_vmo_.offset();
96 size_t next_addr_idx = 0;
97 while (remaining > 0) {
98 dev_vaddr_t vaddr;
99 size_t mapped_len;
100 zx_status_t status = bti_->iommu()->Map(bti_id, pinned_vmo_.vmo(), curr_offset, remaining,
101 perms, &vaddr, &mapped_len);
102 if (status != ZX_OK) {
103 zx_status_t err = UnmapFromIommuLocked();
104 ASSERT(err == ZX_OK);
105 return status;
106 }
107
108 // Ensure we don't end up with any non-terminal chunks that are not |min_contig| in
109 // length.
110 DEBUG_ASSERT(mapped_len % min_contig == 0 || remaining == mapped_len);
111
112 // Break the range up into chunks of length |min_contig|
113 size_t mapped_remaining = mapped_len;
114 while (mapped_remaining > 0) {
115 size_t addr_pages = fbl::min<size_t>(mapped_remaining, min_contig);
116 mapped_addrs_[next_addr_idx] = vaddr;
117 next_addr_idx++;
118 vaddr += addr_pages;
119 mapped_remaining -= addr_pages;
120 }
121
122 curr_offset += mapped_len;
123 remaining -= fbl::min(mapped_len, remaining);
124 }
125 DEBUG_ASSERT(next_addr_idx == mapped_addrs_.size());
126
127 return ZX_OK;
128 }
129
UnmapFromIommuLocked()130 zx_status_t PinnedMemoryTokenDispatcher::UnmapFromIommuLocked() {
131 auto iommu = bti_->iommu();
132 const uint64_t bus_txn_id = bti_->bti_id();
133
134 if (mapped_addrs_[0] == UINT64_MAX) {
135 // No work to do, nothing is mapped.
136 return ZX_OK;
137 }
138
139 zx_status_t status = ZX_OK;
140 if (pinned_vmo_.vmo()->is_contiguous()) {
141 status = iommu->Unmap(bus_txn_id, mapped_addrs_[0], pinned_vmo_.size());
142 } else {
143 const size_t min_contig = bti_->minimum_contiguity();
144 size_t remaining = pinned_vmo_.size();
145 for (size_t i = 0; i < mapped_addrs_.size(); ++i) {
146 dev_vaddr_t addr = mapped_addrs_[i];
147 if (addr == UINT64_MAX) {
148 break;
149 }
150
151 size_t size = fbl::min(remaining, min_contig);
152 DEBUG_ASSERT(size == min_contig || i == mapped_addrs_.size() - 1);
153 // Try to unmap all pages even if we get an error, and return the
154 // first error encountered.
155 zx_status_t err = iommu->Unmap(bus_txn_id, addr, size);
156 DEBUG_ASSERT(err == ZX_OK);
157 if (err != ZX_OK && status == ZX_OK) {
158 status = err;
159 }
160 remaining -= size;
161 }
162 }
163
164 // Clear this so we won't try again if this gets called again in the
165 // destructor.
166 InvalidateMappedAddrsLocked();
167 return status;
168 }
169
MarkUnpinned()170 void PinnedMemoryTokenDispatcher::MarkUnpinned() {
171 Guard<fbl::Mutex> guard{get_lock()};
172 explicitly_unpinned_ = true;
173 }
174
InvalidateMappedAddrsLocked()175 void PinnedMemoryTokenDispatcher::InvalidateMappedAddrsLocked() {
176 // Fill with a known invalid address to simplify cleanup of errors during
177 // mapping
178 for (size_t i = 0; i < mapped_addrs_.size(); ++i) {
179 mapped_addrs_[i] = UINT64_MAX;
180 }
181 }
182
on_zero_handles()183 void PinnedMemoryTokenDispatcher::on_zero_handles() {
184 Guard<fbl::Mutex> guard{get_lock()};
185
186 // Once usermode has dropped the handle, either through zx_handle_close(),
187 // zx_pmt_unpin(), or process crash, prevent access to the pinned memory.
188 //
189 // We do not unpin the VMO until this object is destroyed, to allow usermode
190 // to protect against stray DMA via the quarantining mechanism.
191 zx_status_t status = UnmapFromIommuLocked();
192 ASSERT(status == ZX_OK);
193
194 if (explicitly_unpinned_) {
195 // The cleanup will happen when the reference that on_zero_handles()
196 // was called on goes away.
197 } else {
198 // Add to the quarantine list to prevent the underlying VMO from being
199 // unpinned.
200 bti_->Quarantine(fbl::WrapRefPtr(this));
201 }
202 }
203
~PinnedMemoryTokenDispatcher()204 PinnedMemoryTokenDispatcher::~PinnedMemoryTokenDispatcher() {
205 // In most cases the Unmap will already have run via on_zero_handles(), but
206 // it is possible for that to never run if an error occurs between the
207 // creation of the PinnedMemoryTokenDispatcher and the completion of the
208 // zx_bti_pin() syscall.
209 zx_status_t status = UnmapFromIommuLocked();
210 ASSERT(status == ZX_OK);
211
212 // RemovePmo is the only method that will remove dll_pmt_ from a list, and
213 // it's only called here. dll_pmt_ is only added to a list at the end of
214 // Create, before any reference to the pmt has been given out.
215 // Because of this, it's safe to check InContainer without holding a lock.
216 if (dll_pmt_.InContainer()) {
217 bti_->RemovePmo(this);
218 }
219 }
220
PinnedMemoryTokenDispatcher(fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,PinnedVmObject pinned_vmo,fbl::Array<dev_vaddr_t> mapped_addrs)221 PinnedMemoryTokenDispatcher::PinnedMemoryTokenDispatcher(
222 fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,
223 PinnedVmObject pinned_vmo,
224 fbl::Array<dev_vaddr_t> mapped_addrs)
225 : pinned_vmo_(ktl::move(pinned_vmo)),
226 bti_(ktl::move(bti)), mapped_addrs_(ktl::move(mapped_addrs)) {
227 DEBUG_ASSERT(pinned_vmo_.vmo() != nullptr);
228 InvalidateMappedAddrsLocked();
229 }
230
EncodeAddrs(bool compress_results,bool contiguous,dev_vaddr_t * mapped_addrs,size_t mapped_addrs_count)231 zx_status_t PinnedMemoryTokenDispatcher::EncodeAddrs(bool compress_results,
232 bool contiguous,
233 dev_vaddr_t* mapped_addrs,
234 size_t mapped_addrs_count) {
235 Guard<fbl::Mutex> guard{get_lock()};
236
237 const fbl::Array<dev_vaddr_t>& pmo_addrs = mapped_addrs_;
238 const size_t found_addrs = pmo_addrs.size();
239 if (compress_results) {
240 if (found_addrs != mapped_addrs_count) {
241 return ZX_ERR_INVALID_ARGS;
242 }
243 memcpy(mapped_addrs, pmo_addrs.get(), found_addrs * sizeof(dev_vaddr_t));
244 } else if (contiguous) {
245 if (mapped_addrs_count != 1 || !pinned_vmo_.vmo()->is_contiguous()) {
246 return ZX_ERR_INVALID_ARGS;
247 }
248 *mapped_addrs = pmo_addrs.get()[0];
249 } else {
250 const size_t num_pages = pinned_vmo_.size() / PAGE_SIZE;
251 if (num_pages != mapped_addrs_count) {
252 return ZX_ERR_INVALID_ARGS;
253 }
254 const size_t min_contig = bti_->minimum_contiguity();
255 size_t next_idx = 0;
256 for (size_t i = 0; i < found_addrs; ++i) {
257 dev_vaddr_t extent_base = pmo_addrs[i];
258 for (dev_vaddr_t addr = extent_base;
259 addr < extent_base + min_contig && next_idx < num_pages;
260 addr += PAGE_SIZE, ++next_idx) {
261 mapped_addrs[next_idx] = addr;
262 }
263 }
264 }
265 return ZX_OK;
266 }
267