1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <utility>
6
7 #include <fbl/algorithm.h>
8 #include <fbl/alloc_checker.h>
9 #include <fbl/auto_lock.h>
10 #include <fbl/unique_ptr.h>
11 #include <fbl/vector.h>
12 #include <zircon/assert.h>
13
14 #include "fvm-private.h"
15 #include "vpartition.h"
16
17 namespace fvm {
18
VPartition(VPartitionManager * vpm,size_t entry_index,size_t block_op_size)19 VPartition::VPartition(VPartitionManager* vpm, size_t entry_index, size_t block_op_size)
20 : PartitionDeviceType(vpm->zxdev()), mgr_(vpm), entry_index_(entry_index) {
21
22 memcpy(&info_, &mgr_->Info(), sizeof(block_info_t));
23 info_.block_count = 0;
24 }
25
26 VPartition::~VPartition() = default;
27
Create(VPartitionManager * vpm,size_t entry_index,fbl::unique_ptr<VPartition> * out)28 zx_status_t VPartition::Create(VPartitionManager* vpm, size_t entry_index,
29 fbl::unique_ptr<VPartition>* out) {
30 ZX_DEBUG_ASSERT(entry_index != 0);
31
32 fbl::AllocChecker ac;
33 auto vp = fbl::make_unique_checked<VPartition>(&ac, vpm, entry_index, vpm->BlockOpSize());
34 if (!ac.check()) {
35 return ZX_ERR_NO_MEMORY;
36 }
37
38 *out = std::move(vp);
39 return ZX_OK;
40 }
41
SliceGetLocked(size_t vslice) const42 uint32_t VPartition::SliceGetLocked(size_t vslice) const {
43 ZX_DEBUG_ASSERT(vslice < mgr_->VSliceMax());
44 auto extent = --slice_map_.upper_bound(vslice);
45 if (!extent.IsValid()) {
46 return PSLICE_UNALLOCATED;
47 }
48 ZX_DEBUG_ASSERT(extent->start() <= vslice);
49 return extent->get(vslice);
50 }
51
CheckSlices(size_t vslice_start,size_t * count,bool * allocated)52 zx_status_t VPartition::CheckSlices(size_t vslice_start, size_t* count, bool* allocated) {
53 fbl::AutoLock lock(&lock_);
54
55 if (vslice_start >= mgr_->VSliceMax()) {
56 return ZX_ERR_OUT_OF_RANGE;
57 }
58
59 if (IsKilledLocked()) {
60 return ZX_ERR_BAD_STATE;
61 }
62
63 *count = 0;
64 *allocated = false;
65
66 auto extent = --slice_map_.upper_bound(vslice_start);
67 if (extent.IsValid()) {
68 ZX_DEBUG_ASSERT(extent->start() <= vslice_start);
69 if (extent->start() + extent->size() > vslice_start) {
70 *count = extent->size() - (vslice_start - extent->start());
71 *allocated = true;
72 }
73 }
74
75 if (!(*allocated)) {
76 auto extent = slice_map_.upper_bound(vslice_start);
77 if (extent.IsValid()) {
78 ZX_DEBUG_ASSERT(extent->start() > vslice_start);
79 *count = extent->start() - vslice_start;
80 } else {
81 *count = mgr_->VSliceMax() - vslice_start;
82 }
83 }
84
85 return ZX_OK;
86 }
87
SliceSetLocked(size_t vslice,uint32_t pslice)88 zx_status_t VPartition::SliceSetLocked(size_t vslice, uint32_t pslice) {
89 ZX_DEBUG_ASSERT(vslice < mgr_->VSliceMax());
90 auto extent = --slice_map_.upper_bound(vslice);
91 ZX_DEBUG_ASSERT(!extent.IsValid() || extent->get(vslice) == PSLICE_UNALLOCATED);
92 if (extent.IsValid() && (vslice == extent->end())) {
93 // Easy case: append to existing slice
94 if (!extent->push_back(pslice)) {
95 return ZX_ERR_NO_MEMORY;
96 }
97 } else {
98 // Longer case: there is no extent for this vslice, so we should make
99 // one.
100 fbl::AllocChecker ac;
101 fbl::unique_ptr<SliceExtent> new_extent(new (&ac) SliceExtent(vslice));
102 if (!ac.check()) {
103 return ZX_ERR_NO_MEMORY;
104 } else if (!new_extent->push_back(pslice)) {
105 return ZX_ERR_NO_MEMORY;
106 }
107 ZX_DEBUG_ASSERT(new_extent->GetKey() == vslice);
108 ZX_DEBUG_ASSERT(new_extent->get(vslice) == pslice);
109 slice_map_.insert(std::move(new_extent));
110 extent = --slice_map_.upper_bound(vslice);
111 }
112
113 ZX_DEBUG_ASSERT(SliceGetLocked(vslice) == pslice);
114 AddBlocksLocked((mgr_->SliceSize() / info_.block_size));
115
116 // Merge with the next contiguous extent (if any)
117 auto nextExtent = slice_map_.upper_bound(vslice);
118 if (nextExtent.IsValid() && (vslice + 1 == nextExtent->start())) {
119 if (extent->Merge(*nextExtent)) {
120 slice_map_.erase(*nextExtent);
121 }
122 }
123
124 return ZX_OK;
125 }
126
SliceFreeLocked(size_t vslice)127 bool VPartition::SliceFreeLocked(size_t vslice) {
128 ZX_DEBUG_ASSERT(vslice < mgr_->VSliceMax());
129 ZX_DEBUG_ASSERT(SliceCanFree(vslice));
130 auto extent = --slice_map_.upper_bound(vslice);
131 if (vslice != extent->end() - 1) {
132 // Removing from the middle of an extent; this splits the extent in
133 // two.
134 auto new_extent = extent->Split(vslice);
135 if (new_extent == nullptr) {
136 return false;
137 }
138 slice_map_.insert(std::move(new_extent));
139 }
140 // Removing from end of extent
141 extent->pop_back();
142 if (extent->is_empty()) {
143 slice_map_.erase(*extent);
144 }
145
146 AddBlocksLocked(-(mgr_->SliceSize() / info_.block_size));
147 return true;
148 }
149
ExtentDestroyLocked(size_t vslice)150 void VPartition::ExtentDestroyLocked(size_t vslice) TA_REQ(lock_) {
151 ZX_DEBUG_ASSERT(vslice < mgr_->VSliceMax());
152 ZX_DEBUG_ASSERT(SliceCanFree(vslice));
153 auto extent = --slice_map_.upper_bound(vslice);
154 size_t length = extent->size();
155 slice_map_.erase(*extent);
156 AddBlocksLocked(-((length * mgr_->SliceSize()) / info_.block_size));
157 }
158
RequestBoundCheck(const extend_request_t * request,size_t vslice_max)159 static zx_status_t RequestBoundCheck(const extend_request_t* request, size_t vslice_max) {
160 if (request->offset == 0 || request->offset > vslice_max) {
161 return ZX_ERR_OUT_OF_RANGE;
162 } else if (request->length > vslice_max) {
163 return ZX_ERR_OUT_OF_RANGE;
164 } else if (request->offset + request->length < request->offset ||
165 request->offset + request->length > vslice_max) {
166 return ZX_ERR_OUT_OF_RANGE;
167 }
168 return ZX_OK;
169 }
170
171 // Device protocol (VPartition)
172
DdkIoctl(uint32_t op,const void * cmd,size_t cmdlen,void * reply,size_t max,size_t * out_actual)173 zx_status_t VPartition::DdkIoctl(uint32_t op, const void* cmd, size_t cmdlen, void* reply,
174 size_t max, size_t* out_actual) {
175 switch (op) {
176 case IOCTL_BLOCK_GET_INFO: {
177 block_info_t* info = static_cast<block_info_t*>(reply);
178 if (max < sizeof(*info))
179 return ZX_ERR_BUFFER_TOO_SMALL;
180 fbl::AutoLock lock(&lock_);
181 if (IsKilledLocked())
182 return ZX_ERR_BAD_STATE;
183 memcpy(info, &info_, sizeof(*info));
184 *out_actual = sizeof(*info);
185 return ZX_OK;
186 }
187 case IOCTL_BLOCK_FVM_VSLICE_QUERY: {
188 if (cmdlen < sizeof(query_request_t)) {
189 return ZX_ERR_BUFFER_TOO_SMALL;
190 }
191
192 if (max < sizeof(query_response_t)) {
193 return ZX_ERR_BUFFER_TOO_SMALL;
194 }
195
196 const query_request_t* request = static_cast<const query_request_t*>(cmd);
197
198 if (request->count > MAX_FVM_VSLICE_REQUESTS) {
199 return ZX_ERR_BUFFER_TOO_SMALL;
200 }
201
202 query_response_t* response = static_cast<query_response_t*>(reply);
203 response->count = 0;
204 for (size_t i = 0; i < request->count; i++) {
205 zx_status_t status;
206 if ((status = CheckSlices(request->vslice_start[i], &response->vslice_range[i].count,
207 &response->vslice_range[i].allocated)) != ZX_OK) {
208 return status;
209 }
210 response->count++;
211 }
212
213 *out_actual = sizeof(query_response_t);
214 return ZX_OK;
215 }
216 case IOCTL_BLOCK_FVM_QUERY: {
217 if (max < sizeof(fvm_info_t)) {
218 return ZX_ERR_BUFFER_TOO_SMALL;
219 }
220 fvm_info_t* info = static_cast<fvm_info_t*>(reply);
221 mgr_->Query(info);
222 *out_actual = sizeof(fvm_info_t);
223 return ZX_OK;
224 }
225 case IOCTL_BLOCK_GET_TYPE_GUID: {
226 char* guid = static_cast<char*>(reply);
227 if (max < FVM_GUID_LEN)
228 return ZX_ERR_BUFFER_TOO_SMALL;
229 fbl::AutoLock lock(&lock_);
230 if (IsKilledLocked())
231 return ZX_ERR_BAD_STATE;
232 memcpy(guid, mgr_->GetAllocatedVPartEntry(entry_index_)->type, FVM_GUID_LEN);
233 *out_actual = FVM_GUID_LEN;
234 return ZX_OK;
235 }
236 case IOCTL_BLOCK_GET_PARTITION_GUID: {
237 char* guid = static_cast<char*>(reply);
238 if (max < FVM_GUID_LEN)
239 return ZX_ERR_BUFFER_TOO_SMALL;
240 fbl::AutoLock lock(&lock_);
241 if (IsKilledLocked())
242 return ZX_ERR_BAD_STATE;
243 memcpy(guid, mgr_->GetAllocatedVPartEntry(entry_index_)->guid, FVM_GUID_LEN);
244 *out_actual = FVM_GUID_LEN;
245 return ZX_OK;
246 }
247 case IOCTL_BLOCK_GET_NAME: {
248 char* name = static_cast<char*>(reply);
249 if (max < FVM_NAME_LEN + 1)
250 return ZX_ERR_BUFFER_TOO_SMALL;
251 fbl::AutoLock lock(&lock_);
252 if (IsKilledLocked())
253 return ZX_ERR_BAD_STATE;
254 memcpy(name, mgr_->GetAllocatedVPartEntry(entry_index_)->name, FVM_NAME_LEN);
255 name[FVM_NAME_LEN] = 0;
256 *out_actual = strlen(name);
257 return ZX_OK;
258 }
259 case IOCTL_BLOCK_FVM_EXTEND: {
260 if (cmdlen < sizeof(extend_request_t))
261 return ZX_ERR_BUFFER_TOO_SMALL;
262 const extend_request_t* request = static_cast<const extend_request_t*>(cmd);
263 zx_status_t status;
264 if ((status = RequestBoundCheck(request, mgr_->VSliceMax())) != ZX_OK) {
265 return status;
266 } else if (request->length == 0) {
267 return ZX_OK;
268 }
269 return mgr_->AllocateSlices(this, request->offset, request->length);
270 }
271 case IOCTL_BLOCK_FVM_SHRINK: {
272 if (cmdlen < sizeof(extend_request_t))
273 return ZX_ERR_BUFFER_TOO_SMALL;
274 const extend_request_t* request = static_cast<const extend_request_t*>(cmd);
275 zx_status_t status;
276 if ((status = RequestBoundCheck(request, mgr_->VSliceMax())) != ZX_OK) {
277 return status;
278 } else if (request->length == 0) {
279 return ZX_OK;
280 }
281 return mgr_->FreeSlices(this, request->offset, request->length);
282 }
283 case IOCTL_BLOCK_FVM_DESTROY_PARTITION: {
284 return mgr_->FreeSlices(this, 0, mgr_->VSliceMax());
285 }
286 default:
287 return ZX_ERR_NOT_SUPPORTED;
288 }
289 }
290
291 typedef struct multi_txn_state {
multi_txn_statefvm::multi_txn_state292 multi_txn_state(size_t total, block_op_t* txn, block_impl_queue_callback cb, void* cookie)
293 : txns_completed(0), txns_total(total), status(ZX_OK), original(txn), completion_cb(cb),
294 cookie(cookie) {}
295
296 fbl::Mutex lock;
297 size_t txns_completed TA_GUARDED(lock);
298 size_t txns_total TA_GUARDED(lock);
299 zx_status_t status TA_GUARDED(lock);
300 block_op_t* original TA_GUARDED(lock);
301 block_impl_queue_callback completion_cb TA_GUARDED(lock);
302 void* cookie TA_GUARDED(lock);
303 } multi_txn_state_t;
304
multi_txn_completion(void * cookie,zx_status_t status,block_op_t * txn)305 static void multi_txn_completion(void* cookie, zx_status_t status, block_op_t* txn) {
306 multi_txn_state_t* state = static_cast<multi_txn_state_t*>(cookie);
307 bool last_txn = false;
308 {
309 fbl::AutoLock lock(&state->lock);
310 state->txns_completed++;
311 if (state->status == ZX_OK && status != ZX_OK) {
312 state->status = status;
313 }
314 if (state->txns_completed == state->txns_total) {
315 last_txn = true;
316 state->completion_cb(state->cookie, state->status, state->original);
317 }
318 }
319
320 if (last_txn) {
321 delete state;
322 }
323 delete[] txn;
324 }
325
BlockImplQueue(block_op_t * txn,block_impl_queue_callback completion_cb,void * cookie)326 void VPartition::BlockImplQueue(block_op_t* txn, block_impl_queue_callback completion_cb,
327 void* cookie) {
328 ZX_DEBUG_ASSERT(mgr_->BlockOpSize() > 0);
329 switch (txn->command & BLOCK_OP_MASK) {
330 case BLOCK_OP_READ:
331 case BLOCK_OP_WRITE:
332 break;
333 // Pass-through operations
334 case BLOCK_OP_FLUSH:
335 mgr_->Queue(txn, completion_cb, cookie);
336 return;
337 default:
338 fprintf(stderr, "[FVM BlockQueue] Unsupported Command: %x\n", txn->command);
339 completion_cb(cookie, ZX_ERR_NOT_SUPPORTED, txn);
340 return;
341 }
342
343 const uint64_t device_capacity = DdkGetSize() / BlockSize();
344 if (txn->rw.length == 0) {
345 completion_cb(cookie, ZX_ERR_INVALID_ARGS, txn);
346 return;
347 } else if ((txn->rw.offset_dev >= device_capacity) ||
348 (device_capacity - txn->rw.offset_dev < txn->rw.length)) {
349 completion_cb(cookie, ZX_ERR_OUT_OF_RANGE, txn);
350 return;
351 }
352
353 const size_t disk_size = mgr_->DiskSize();
354 const size_t slice_size = mgr_->SliceSize();
355 const uint64_t blocks_per_slice = slice_size / BlockSize();
356 // Start, end both inclusive
357 size_t vslice_start = txn->rw.offset_dev / blocks_per_slice;
358 size_t vslice_end = (txn->rw.offset_dev + txn->rw.length - 1) / blocks_per_slice;
359
360 fbl::AutoLock lock(&lock_);
361 if (vslice_start == vslice_end) {
362 // Common case: txn occurs within one slice
363 uint32_t pslice = SliceGetLocked(vslice_start);
364 if (pslice == PSLICE_UNALLOCATED) {
365 completion_cb(cookie, ZX_ERR_OUT_OF_RANGE, txn);
366 return;
367 }
368 txn->rw.offset_dev = SliceStart(disk_size, slice_size, pslice) / BlockSize() +
369 (txn->rw.offset_dev % blocks_per_slice);
370 mgr_->Queue(txn, completion_cb, cookie);
371 return;
372 }
373
374 // Less common case: txn spans multiple slices
375
376 // First, check that all slices are allocated.
377 // If any are missing, then this txn will fail.
378 bool contiguous = true;
379 for (size_t vslice = vslice_start; vslice <= vslice_end; vslice++) {
380 if (SliceGetLocked(vslice) == PSLICE_UNALLOCATED) {
381 completion_cb(cookie, ZX_ERR_OUT_OF_RANGE, txn);
382 return;
383 }
384 if (vslice != vslice_start && SliceGetLocked(vslice - 1) + 1 != SliceGetLocked(vslice)) {
385 contiguous = false;
386 }
387 }
388
389 // Ideal case: slices are contiguous
390 if (contiguous) {
391 uint32_t pslice = SliceGetLocked(vslice_start);
392 txn->rw.offset_dev = SliceStart(disk_size, slice_size, pslice) / BlockSize() +
393 (txn->rw.offset_dev % blocks_per_slice);
394 mgr_->Queue(txn, completion_cb, cookie);
395 return;
396 }
397
398 // Harder case: Noncontiguous slices
399 const size_t txn_count = vslice_end - vslice_start + 1;
400 fbl::Vector<block_op_t*> txns;
401 txns.reserve(txn_count);
402
403 fbl::AllocChecker ac;
404 fbl::unique_ptr<multi_txn_state_t> state(
405 new (&ac) multi_txn_state_t(txn_count, txn, completion_cb, cookie));
406 if (!ac.check()) {
407 completion_cb(cookie, ZX_ERR_NO_MEMORY, txn);
408 return;
409 }
410
411 uint32_t length_remaining = txn->rw.length;
412 for (size_t i = 0; i < txn_count; i++) {
413 size_t vslice = vslice_start + i;
414 uint32_t pslice = SliceGetLocked(vslice);
415
416 uint64_t offset_vmo = txn->rw.offset_vmo;
417 uint64_t length;
418 if (vslice == vslice_start) {
419 length = fbl::round_up(txn->rw.offset_dev + 1, blocks_per_slice) - txn->rw.offset_dev;
420 } else if (vslice == vslice_end) {
421 length = length_remaining;
422 offset_vmo += txn->rw.length - length_remaining;
423 } else {
424 length = blocks_per_slice;
425 offset_vmo += txns[0]->rw.length + blocks_per_slice * (i - 1);
426 }
427 ZX_DEBUG_ASSERT(length <= blocks_per_slice);
428 ZX_DEBUG_ASSERT(length <= length_remaining);
429
430 txns.push_back(reinterpret_cast<block_op_t*>(new uint8_t[mgr_->BlockOpSize()]));
431 if (txns[i] == nullptr) {
432 while (i-- > 0) {
433 delete[] txns[i];
434 }
435 completion_cb(cookie, ZX_ERR_NO_MEMORY, txn);
436 return;
437 }
438 memcpy(txns[i], txn, sizeof(*txn));
439 txns[i]->rw.offset_vmo = offset_vmo;
440 txns[i]->rw.length = static_cast<uint32_t>(length);
441 txns[i]->rw.offset_dev = SliceStart(disk_size, slice_size, pslice) / BlockSize();
442 if (vslice == vslice_start) {
443 txns[i]->rw.offset_dev += (txn->rw.offset_dev % blocks_per_slice);
444 }
445 length_remaining -= txns[i]->rw.length;
446 }
447 ZX_DEBUG_ASSERT(length_remaining == 0);
448
449 for (size_t i = 0; i < txn_count; i++) {
450 mgr_->Queue(txns[i], multi_txn_completion, state.get());
451 }
452 // TODO(johngro): ask smklein why it is OK to release this managed pointer.
453 __UNUSED auto ptr = state.release();
454 }
455
DdkGetSize()456 zx_off_t VPartition::DdkGetSize() {
457 const zx_off_t sz = mgr_->VSliceMax() * mgr_->SliceSize();
458 // Check for overflow; enforced when loading driver
459 ZX_DEBUG_ASSERT(sz / mgr_->VSliceMax() == mgr_->SliceSize());
460 return sz;
461 }
462
DdkUnbind()463 void VPartition::DdkUnbind() {
464 DdkRemove();
465 }
466
DdkRelease()467 void VPartition::DdkRelease() {
468 delete this;
469 }
470
BlockImplQuery(block_info_t * info_out,size_t * block_op_size_out)471 void VPartition::BlockImplQuery(block_info_t* info_out, size_t* block_op_size_out) {
472 static_assert(fbl::is_same<decltype(info_out), decltype(&info_)>::value, "Info type mismatch");
473 memcpy(info_out, &info_, sizeof(info_));
474 *block_op_size_out = mgr_->BlockOpSize();
475 }
476
GetParent() const477 zx_device_t* VPartition::GetParent() const {
478 return mgr_->parent();
479 }
480
481 } // namespace fvm
482