1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <libzbi/zbi-zx.h>
6
7 #include <lib/zx/vmar.h>
8 #include <lib/zx/vmo.h>
9 #include <limits.h>
10 #include <string.h>
11 #include <zircon/assert.h>
12
13 #include <utility>
14
15 namespace zbi {
16
17 namespace {
18
PageRound(size_t size)19 size_t PageRound(size_t size) {
20 return (size + PAGE_SIZE) & -(size_t)PAGE_SIZE;
21 }
22
23 }
24
Init(zx::vmo vmo)25 zx_status_t ZbiVMO::Init(zx::vmo vmo) {
26 vmo_ = std::move(vmo);
27 auto status = vmo_.get_size(&capacity_);
28 if (status == ZX_OK && capacity_ > 0) {
29 status = Map();
30 }
31 return status;
32 }
33
Release()34 zx::vmo ZbiVMO::Release() {
35 Unmap();
36 capacity_= 0;
37 return std::move(vmo_);
38 }
39
~ZbiVMO()40 ZbiVMO::~ZbiVMO() {
41 Unmap();
42 }
43
Map()44 zx_status_t ZbiVMO::Map() {
45 uintptr_t mapping;
46 auto status = zx::vmar::root_self()->map(
47 0, vmo_, 0, capacity_, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
48 &mapping);
49 if (status == ZX_OK) {
50 base_ = reinterpret_cast<uint8_t*>(mapping);
51 }
52 return status;
53 }
54
Unmap()55 void ZbiVMO::Unmap() {
56 if (base_) {
57 [[maybe_unused]] auto status =
58 zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(base_),
59 capacity_);
60 ZX_DEBUG_ASSERT(status == ZX_OK);
61 base_ = nullptr;
62 }
63 }
64
AppendSection(uint32_t length,uint32_t type,uint32_t extra,uint32_t flags,const void * payload)65 zbi_result_t ZbiVMO::AppendSection(uint32_t length, uint32_t type,
66 uint32_t extra, uint32_t flags,
67 const void* payload) {
68 void* dest;
69 auto result = CreateSection(length, type, extra, flags, &dest);
70 if (result == ZBI_RESULT_OK) {
71 memcpy(dest, payload, length);
72 }
73 return result;
74 }
75
CreateSection(uint32_t length,uint32_t type,uint32_t extra,uint32_t flags,void ** payload)76 zbi_result_t ZbiVMO::CreateSection(uint32_t length, uint32_t type,
77 uint32_t extra, uint32_t flags,
78 void** payload) {
79 auto result = Zbi::CreateSection(length, type, extra, flags, payload);
80 if (result == ZBI_RESULT_TOO_BIG) {
81 const size_t new_capacity =
82 PageRound(Length() + sizeof(zbi_header_t) + length);
83 ZX_DEBUG_ASSERT(new_capacity > capacity_);
84 auto status = vmo_.set_size(new_capacity);
85 if (status == ZX_OK) {
86 Unmap();
87 capacity_ = new_capacity;
88 status = Map();
89 }
90 if (status == ZX_OK) {
91 result = Zbi::CreateSection(length, type, extra, flags, payload);
92 }
93 }
94 return result;
95 }
96
SplitComplete(ZbiVMO * kernel,ZbiVMO * data) const97 zbi_result_t ZbiVMO::SplitComplete(ZbiVMO* kernel, ZbiVMO* data) const {
98 // First check that it's a proper complete ZBI. After this it should be
99 // safe to trust the headers (modulo racing modification of the original
100 // VMO, which we can't help).
101 auto result = CheckComplete();
102 if (result != ZBI_RESULT_OK) {
103 return result;
104 }
105
106 // First clone a VMO covering just the leading kernel portion of the ZBI.
107 auto kernel_hdr = Header() + 1;
108 const uint32_t kernel_size =
109 static_cast<uint32_t>(sizeof(zbi_header_t) * 2) + kernel_hdr->length;
110 const size_t kernel_vmo_size = PageRound(kernel_size);
111 auto status = vmo_.clone(ZX_VMO_CLONE_COPY_ON_WRITE, 0, kernel_vmo_size,
112 &kernel->vmo_);
113 if (status != ZX_OK) {
114 return ZBI_RESULT_TOO_BIG;
115 }
116
117 // Map it in.
118 kernel->Unmap(); // Just in case.
119 kernel->capacity_ = kernel_vmo_size;
120 status = kernel->Map();
121 if (status != ZX_OK) {
122 return ZBI_RESULT_TOO_BIG;
123 }
124 // Update the size in the copied container header.
125 kernel->Header()->length =
126 kernel_size - static_cast<uint32_t>(sizeof(zbi_header_t));
127
128 // Now create (or clone if possible) a VMO for the remainder.
129 const uint32_t data_payload_size = Length() - kernel_size;
130 const size_t data_vmo_size = PageRound(
131 data_payload_size + static_cast<uint32_t>(sizeof(zbi_header_t)));
132
133 // If by some miracle the remainder is aligned exactly right, then
134 // we can clone the trailing portion as well.
135 bool clone = (kernel_size - sizeof(zbi_header_t)) % PAGE_SIZE == 0;
136 status = clone ?
137 vmo_.clone(ZX_VMO_CLONE_COPY_ON_WRITE,
138 kernel_size - sizeof(zbi_header_t),
139 data_vmo_size, &data->vmo_) :
140 vmo_.create(data_vmo_size, 0, &data->vmo_);
141 if (status != ZX_OK) {
142 return ZBI_RESULT_TOO_BIG;
143 }
144
145 // Map it in.
146 data->Unmap(); // Just in case.
147 data->capacity_ = data_vmo_size;
148 status = data->Map();
149 if (status != ZX_OK) {
150 return ZBI_RESULT_TOO_BIG;
151 }
152
153 // Fill in the header and data (if not already virtually copied).
154 *data->Header() = (zbi_header_t)ZBI_CONTAINER_HEADER(data_payload_size);
155 if (!clone) {
156 memcpy(data->Payload(), Base() + kernel_size, data_payload_size);
157 }
158
159 return ZBI_RESULT_OK;
160 }
161
162 // C API wrapper.
SplitCompleteWrapper(zx_handle_t zbi_vmo,zx_handle_t * kernel_vmo,zx_handle_t * data_vmo)163 zbi_result_t SplitCompleteWrapper(zx_handle_t zbi_vmo,
164 zx_handle_t* kernel_vmo,
165 zx_handle_t* data_vmo) {
166 ZbiVMO zbi, kernel, data;
167 auto status = zbi.Init(zx::vmo(zbi_vmo));
168 if (status != ZX_OK) {
169 return ZBI_RESULT_TOO_BIG;
170 }
171 auto result = zbi.SplitComplete(&kernel, &data);
172 if (result == ZBI_RESULT_OK) {
173 *kernel_vmo = kernel.vmo_.release();
174 *data_vmo = data.vmo_.release();
175 }
176 return result;
177 }
178
zbi_split_complete(zx_handle_t zbi_vmo,zx_handle_t * kernel_vmo,zx_handle_t * data_vmo)179 zbi_result_t zbi_split_complete(zx_handle_t zbi_vmo,
180 zx_handle_t* kernel_vmo,
181 zx_handle_t* data_vmo) {
182 return SplitCompleteWrapper(zbi_vmo, kernel_vmo, data_vmo);
183 }
184
185 } // namespace zbi
186