1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <climits>
6 #include <fbl/auto_call.h>
7 #include <lib/fzl/pinned-vmo.h>
8 #include <limits>
9
10 namespace fzl {
11
Pin(const zx::vmo & vmo,const zx::bti & bti,uint32_t rights)12 zx_status_t PinnedVmo::Pin(const zx::vmo& vmo, const zx::bti& bti, uint32_t rights) {
13 // If we are holding a pinned memory token, then we are already holding a
14 // pinned VMO. It is an error to try and pin a new VMO without first
15 // explicitly unpinning the old one.
16 if (pmt_.is_valid()) {
17 ZX_DEBUG_ASSERT(regions_ != nullptr);
18 ZX_DEBUG_ASSERT(region_count_ > 0);
19 return ZX_ERR_BAD_STATE;
20 }
21
22 // Check our args, read/write is all that users may ask for.
23 constexpr uint32_t kAllowedRights = ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE;
24 if (((rights & kAllowedRights) != rights) || !vmo.is_valid() || !bti.is_valid()) {
25 return ZX_ERR_INVALID_ARGS;
26 }
27
28 // Before proceeding, we need to know how big the VMO we are pinning is.
29 zx_status_t res;
30 uint64_t vmo_size;
31 res = vmo.get_size(&vmo_size);
32 if (res != ZX_OK) {
33 return res;
34 }
35
36 // Allocate storage for the results.
37 ZX_DEBUG_ASSERT((vmo_size > 0) && !(vmo_size & (PAGE_SIZE - 1)));
38 ZX_DEBUG_ASSERT((vmo_size / PAGE_SIZE) < std::numeric_limits<uint32_t>::max());
39 fbl::AllocChecker ac;
40 uint32_t page_count = static_cast<uint32_t>(vmo_size / PAGE_SIZE);
41 fbl::unique_ptr<zx_paddr_t[]> addrs(new (&ac) zx_paddr_t[page_count]);
42 if (!ac.check()) {
43 return ZX_ERR_NO_MEMORY;
44 }
45
46 // Now actually pin the region.
47 res = bti.pin(rights, vmo, 0, vmo_size, addrs.get(), page_count, &pmt_);
48 if (res != ZX_OK) {
49 return res;
50 }
51
52 // From here on out, if anything goes wrong, we need to make sure to clean
53 // up. Setup an autocall to take care of this for us.
54 auto cleanup = fbl::MakeAutoCall([&]() { UnpinInternal(); });
55
56 // Do a quick pass over the pages to figure out how many adjacent pages we
57 // can merge. This will let us know how many regions we will need storage
58 // for our regions array.
59 zx_paddr_t last = addrs[0];
60 region_count_ = 1;
61 for (uint32_t i = 1; i < page_count; ++i) {
62 if (addrs[i] != (last + PAGE_SIZE)) {
63 ++region_count_;
64 }
65 last = addrs[i];
66 }
67
68 // Allocate storage for our regions.
69 regions_.reset(new (&ac) Region[region_count_]);
70 if (!ac.check()) {
71 return ZX_ERR_NO_MEMORY;
72 }
73
74 // Finally, go ahead and merge any adjacent pages to compute our set of
75 // regions and we should be good to go;
76 regions_[0].phys_addr = addrs[0];
77 regions_[0].size = PAGE_SIZE;
78 for (uint32_t i = 1, j = 0; i < page_count; ++i) {
79 ZX_DEBUG_ASSERT(j < region_count_);
80
81 if ((regions_[j].phys_addr + regions_[j].size) == addrs[i]) {
82 // Merge!
83 regions_[j].size += PAGE_SIZE;
84 } else {
85 // New Region!
86 ++j;
87 ZX_DEBUG_ASSERT(j < region_count_);
88 regions_[j].phys_addr = addrs[i];
89 regions_[j].size = PAGE_SIZE;
90 }
91 }
92
93 cleanup.cancel();
94 return ZX_OK;
95 }
96
Unpin()97 void PinnedVmo::Unpin() {
98 if (!pmt_.is_valid()) {
99 ZX_DEBUG_ASSERT(regions_ == nullptr);
100 ZX_DEBUG_ASSERT(region_count_ == 0);
101 return;
102 }
103
104 ZX_DEBUG_ASSERT(regions_ != nullptr);
105 ZX_DEBUG_ASSERT(region_count_ > 0);
106
107 UnpinInternal();
108 }
109
UnpinInternal()110 void PinnedVmo::UnpinInternal() {
111 ZX_DEBUG_ASSERT(pmt_.is_valid());
112
113 // Given the level of sanity checking we have done so far, it should be
114 // completely impossible for us to fail to unpin this memory.
115 __UNUSED zx_status_t res;
116 res = pmt_.unpin();
117 ZX_DEBUG_ASSERT(res == ZX_OK);
118
119 regions_.reset();
120 region_count_ = 0;
121 }
122
123 } // namespace fzl
124