1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <vm/vm_page_list.h>
8
9 #include <err.h>
10 #include <fbl/alloc_checker.h>
11 #include <inttypes.h>
12 #include <ktl/move.h>
13 #include <trace.h>
14 #include <vm/pmm.h>
15 #include <vm/vm.h>
16 #include <zircon/types.h>
17
18 #include "vm_priv.h"
19
20 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
21
22 namespace {
23
offset_to_node_offset(uint64_t offset)24 inline uint64_t offset_to_node_offset(uint64_t offset) {
25 return ROUNDDOWN(offset, PAGE_SIZE * VmPageListNode::kPageFanOut);
26 }
27
offset_to_node_index(uint64_t offset)28 inline uint64_t offset_to_node_index(uint64_t offset) {
29 return (offset >> PAGE_SIZE_SHIFT) % VmPageListNode::kPageFanOut;
30 }
31
move_vm_page_list_node(VmPageListNode * dest,VmPageListNode * src)32 inline void move_vm_page_list_node(VmPageListNode* dest, VmPageListNode* src) {
33 // Called by move ctor/assignment. Move assignment clears the dest node first.
34 ASSERT(dest->IsEmpty());
35
36 for (unsigned i = 0; i < VmPageListNode::kPageFanOut; i++) {
37 vm_page* page = src->RemovePage(i);
38 if (page) {
39 dest->AddPage(page, i);
40 }
41 }
42 }
43
44 } // namespace
45
VmPageListNode(uint64_t offset)46 VmPageListNode::VmPageListNode(uint64_t offset)
47 : obj_offset_(offset) {
48 LTRACEF("%p offset %#" PRIx64 "\n", this, obj_offset_);
49 }
50
~VmPageListNode()51 VmPageListNode::~VmPageListNode() {
52 LTRACEF("%p offset %#" PRIx64 "\n", this, obj_offset_);
53 canary_.Assert();
54
55 for (__UNUSED auto p : pages_) {
56 DEBUG_ASSERT(p == nullptr);
57 }
58 }
59
GetPage(size_t index)60 vm_page* VmPageListNode::GetPage(size_t index) {
61 canary_.Assert();
62 DEBUG_ASSERT(index < kPageFanOut);
63 return pages_[index];
64 }
65
RemovePage(size_t index)66 vm_page* VmPageListNode::RemovePage(size_t index) {
67 canary_.Assert();
68 DEBUG_ASSERT(index < kPageFanOut);
69
70 auto p = pages_[index];
71 if (!p) {
72 return nullptr;
73 }
74
75 pages_[index] = nullptr;
76
77 return p;
78 }
79
AddPage(vm_page * p,size_t index)80 zx_status_t VmPageListNode::AddPage(vm_page* p, size_t index) {
81 canary_.Assert();
82 DEBUG_ASSERT(index < kPageFanOut);
83 if (pages_[index]) {
84 return ZX_ERR_ALREADY_EXISTS;
85 }
86 pages_[index] = p;
87 return ZX_OK;
88 }
89
VmPageList()90 VmPageList::VmPageList() {
91 LTRACEF("%p\n", this);
92 }
93
~VmPageList()94 VmPageList::~VmPageList() {
95 LTRACEF("%p\n", this);
96 DEBUG_ASSERT(list_.is_empty());
97 }
98
AddPage(vm_page * p,uint64_t offset)99 zx_status_t VmPageList::AddPage(vm_page* p, uint64_t offset) {
100 uint64_t node_offset = offset_to_node_offset(offset);
101 size_t index = offset_to_node_index(offset);
102
103 LTRACEF_LEVEL(2, "%p page %p, offset %#" PRIx64 " node_offset %#" PRIx64 " index %zu\n", this, p, offset,
104 node_offset, index);
105
106 // lookup the tree node that holds this page
107 auto pln = list_.find(node_offset);
108 if (!pln.IsValid()) {
109 fbl::AllocChecker ac;
110 ktl::unique_ptr<VmPageListNode> pl =
111 ktl::unique_ptr<VmPageListNode>(new (&ac) VmPageListNode(node_offset));
112 if (!ac.check()) {
113 return ZX_ERR_NO_MEMORY;
114 }
115
116 LTRACEF("allocating new inner node %p\n", pl.get());
117 __UNUSED auto status = pl->AddPage(p, index);
118 DEBUG_ASSERT(status == ZX_OK);
119
120 list_.insert(ktl::move(pl));
121 return ZX_OK;
122 } else {
123 return pln->AddPage(p, index);
124 }
125 }
126
GetPage(uint64_t offset)127 vm_page* VmPageList::GetPage(uint64_t offset) {
128 uint64_t node_offset = offset_to_node_offset(offset);
129 size_t index = offset_to_node_index(offset);
130
131 LTRACEF_LEVEL(2, "%p offset %#" PRIx64 " node_offset %#" PRIx64 " index %zu\n", this, offset, node_offset,
132 index);
133
134 // lookup the tree node that holds this page
135 auto pln = list_.find(node_offset);
136 if (!pln.IsValid()) {
137 return nullptr;
138 }
139
140 return pln->GetPage(index);
141 }
142
RemovePage(uint64_t offset,vm_page_t ** page_out)143 bool VmPageList::RemovePage(uint64_t offset, vm_page_t** page_out) {
144 DEBUG_ASSERT(page_out);
145
146 uint64_t node_offset = offset_to_node_offset(offset);
147 size_t index = offset_to_node_index(offset);
148
149 LTRACEF_LEVEL(2, "%p offset %#" PRIx64 " node_offset %#" PRIx64 " index %zu\n", this, offset, node_offset,
150 index);
151
152 // lookup the tree node that holds this page
153 auto pln = list_.find(node_offset);
154 if (!pln.IsValid()) {
155 return false;
156 }
157
158 // free this page
159 auto page = pln->RemovePage(index);
160 if (page) {
161 // if it was the last page in the node, remove the node from the tree
162 if (pln->IsEmpty()) {
163 LTRACEF_LEVEL(2, "%p freeing the list node\n", this);
164 list_.erase(*pln);
165 }
166
167 *page_out = page;
168 return true;
169 } else {
170 return false;
171 }
172 }
173
FreePages(uint64_t start_offset,uint64_t end_offset)174 void VmPageList::FreePages(uint64_t start_offset, uint64_t end_offset) {
175 // Find the first node with a start after start_offset; if start_offset
176 // is in a node, it'll be in the one before that one.
177 auto start = --list_.upper_bound(start_offset);
178 if (!start.IsValid()) {
179 start = list_.begin();
180 }
181 // Find the first node which is completely after the end of the region. If
182 // end_offset falls in the middle of a node, this finds the next node.
183 const auto end = list_.lower_bound(end_offset);
184
185 list_node list;
186 list_initialize(&list);
187
188 // Visitor function which moves the pages from the VmPageListNode
189 // to the accumulation list.
190 auto per_page_func = [&list](vm_page*& p, uint64_t offset) {
191 list_add_tail(&list, &p->queue_node);
192 p = nullptr;
193 return ZX_ERR_NEXT;
194 };
195
196 // Iterate through all nodes which have at least some overlap with the
197 // region, freeing the pages and erasing nodes which become empty.
198 while (start != end) {
199 auto cur = start++;
200 cur->ForEveryPage(per_page_func, start_offset, end_offset);
201 if (cur->IsEmpty()) {
202 list_.erase(cur);
203 }
204 }
205
206 pmm_free(&list);
207 }
208
FreeAllPages()209 size_t VmPageList::FreeAllPages() {
210 LTRACEF("%p\n", this);
211
212 list_node list;
213 list_initialize(&list);
214
215 size_t count = 0;
216
217 // per page get a reference to the page pointer inside the page list node
218 auto per_page_func = [&](vm_page*& p, uint64_t offset) {
219
220 // add the page to our list and null out the inner node
221 list_add_tail(&list, &p->queue_node);
222 p = nullptr;
223 count++;
224 return ZX_ERR_NEXT;
225 };
226
227 // walk the tree in order, freeing all the pages on every node
228 ForEveryPage(per_page_func);
229
230 // return all the pages to the pmm at once
231 pmm_free(&list);
232
233 // empty the tree
234 list_.clear();
235
236 return count;
237 }
238
IsEmpty()239 bool VmPageList::IsEmpty() {
240 return list_.is_empty();
241 }
242
TakePages(uint64_t offset,uint64_t length)243 VmPageSpliceList VmPageList::TakePages(uint64_t offset, uint64_t length) {
244 VmPageSpliceList res(offset, length);
245 const uint64_t end = offset + length;
246
247 // If we can't take the whole node at the start of the range,
248 // the shove the pages into the splice list head_ node.
249 while (offset_to_node_index(offset) != 0 && offset < end) {
250 vm_page_t* page;
251 if (RemovePage(offset, &page)) {
252 res.head_.AddPage(page, offset_to_node_index(offset));
253 }
254 offset += PAGE_SIZE;
255 }
256
257 // As long as the current and end node offsets are different, we
258 // can just move the whole node into the splice list.
259 while (offset_to_node_offset(offset) != offset_to_node_offset(end)) {
260 ktl::unique_ptr<VmPageListNode> node = list_.erase(offset_to_node_offset(offset));
261 if (node) {
262 res.middle_.insert(ktl::move(node));
263 }
264 offset += (PAGE_SIZE * VmPageListNode::kPageFanOut);
265 }
266
267 // Move any remaining pages into the splice list tail_ node.
268 while (offset < end) {
269 vm_page_t* page;
270 if (RemovePage(offset, &page)) {
271 res.tail_.AddPage(page, offset_to_node_index(offset));
272 }
273 offset += PAGE_SIZE;
274 }
275
276 return res;
277 }
278
VmPageSpliceList()279 VmPageSpliceList::VmPageSpliceList() : VmPageSpliceList(0, 0) {}
280
VmPageSpliceList(uint64_t offset,uint64_t length)281 VmPageSpliceList::VmPageSpliceList(uint64_t offset, uint64_t length)
282 : offset_(offset), length_(length) {
283 }
284
VmPageSpliceList(VmPageSpliceList && other)285 VmPageSpliceList::VmPageSpliceList(VmPageSpliceList&& other)
286 : offset_(other.offset_), length_(other.length_),
287 pos_(other.pos_), middle_(ktl::move(other.middle_)) {
288 move_vm_page_list_node(&head_, &other.head_);
289 move_vm_page_list_node(&tail_, &other.tail_);
290
291 other.offset_ = other.length_ = other.pos_ = 0;
292 }
293
operator =(VmPageSpliceList && other)294 VmPageSpliceList& VmPageSpliceList::operator=(VmPageSpliceList&& other) {
295 FreeAllPages();
296
297 offset_ = other.offset_;
298 length_ = other.length_;
299 pos_ = other.pos_;
300 move_vm_page_list_node(&head_, &other.head_);
301 move_vm_page_list_node(&tail_, &other.tail_);
302 middle_ = ktl::move(other.middle_);
303
304 other.offset_ = other.length_ = other.pos_ = 0;
305
306 return *this;
307 }
308
~VmPageSpliceList()309 VmPageSpliceList::~VmPageSpliceList() {
310 FreeAllPages();
311 }
312
FreeAllPages()313 void VmPageSpliceList::FreeAllPages() {
314 // Free any pages owned by the splice list.
315 while (!IsDone()) {
316 auto page = Pop();
317 if (page) {
318 pmm_free_page(page);
319 }
320 }
321 }
322
Pop()323 vm_page* VmPageSpliceList::Pop() {
324 if (IsDone()) {
325 DEBUG_ASSERT_MSG(false, "Popped from empty splice list");
326 return nullptr;
327 }
328
329 const uint64_t cur_offset = offset_ + pos_;
330 const auto cur_node_idx = offset_to_node_index(cur_offset);
331 const auto cur_node_offset = offset_to_node_offset(cur_offset);
332
333 vm_page* res;
334 if (offset_to_node_index(offset_) != 0
335 && offset_to_node_offset(offset_) == cur_node_offset) {
336 // If the original offset means that pages were placed in head_
337 // and the current offset points to the same node, look there.
338 res = head_.RemovePage(cur_node_idx);
339 } else if (cur_node_offset != offset_to_node_offset(offset_ + length_)) {
340 // If the current offset isn't pointing to the tail node,
341 // look in the middle tree.
342 auto middle_node = middle_.find(cur_node_offset);
343 if (middle_node.IsValid()) {
344 res = middle_node->RemovePage(cur_node_idx);
345 } else {
346 res = nullptr;
347 }
348 } else {
349 // If none of the other cases, we're in the tail_.
350 res = tail_.RemovePage(cur_node_idx);
351 }
352
353 pos_ += PAGE_SIZE;
354 return res;
355 }
356