1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 #include "nandpart.h"
5
6 #include <assert.h>
7 #include <inttypes.h>
8 #include <stdio.h>
9 #include <string.h>
10
11 #include <ddk/binding.h>
12 #include <ddk/debug.h>
13 #include <ddk/driver.h>
14 #include <ddk/metadata.h>
15 #include <ddk/metadata/nand.h>
16 #include <ddk/protocol/badblock.h>
17
18 #include <fbl/algorithm.h>
19 #include <fbl/alloc_checker.h>
20 #include <fbl/unique_ptr.h>
21 #include <lib/sync/completion.h>
22 #include <zircon/boot/image.h>
23 #include <zircon/hw/gpt.h>
24 #include <zircon/types.h>
25
26 #include "nandpart-utils.h"
27
28 namespace nand {
29 namespace {
30
31 constexpr uint8_t fvm_guid[] = GUID_FVM_VALUE;
32 constexpr uint8_t test_guid[] = GUID_TEST_VALUE;
33
34 struct NandPartOpContext {
35 uint32_t offset;
36 nand_queue_callback completion_cb;
37 void* cookie;
38 };
39
40 // Shim for calling sub-partition's callback.
CompletionCallback(void * cookie,zx_status_t status,nand_operation_t * op)41 void CompletionCallback(void* cookie, zx_status_t status, nand_operation_t* op) {
42 auto* ctx = reinterpret_cast<NandPartOpContext*>(cookie);
43 // Re-translate the offsets.
44 switch (op->command) {
45 case NAND_OP_READ:
46 case NAND_OP_WRITE:
47 op->rw.offset_nand -= ctx->offset;
48 break;
49 case NAND_OP_ERASE:
50 op->erase.first_block -= ctx->offset;
51 break;
52 default:
53 ZX_ASSERT(false);
54 }
55 ctx->completion_cb(ctx->cookie, status, op);
56 }
57
58 } // namespace
59
Create(zx_device_t * parent)60 zx_status_t NandPartDevice::Create(zx_device_t* parent) {
61 zxlogf(INFO, "NandPartDevice::Create: Starting...!\n");
62
63 nand_protocol_t nand_proto;
64 if (device_get_protocol(parent, ZX_PROTOCOL_NAND, &nand_proto) != ZX_OK) {
65 zxlogf(ERROR, "nandpart: parent device '%s': does not support nand protocol\n",
66 device_get_name(parent));
67 return ZX_ERR_NOT_SUPPORTED;
68 }
69
70 // Query parent to get its fuchsia_hardware_nand_Info and size for nand_operation_t.
71 fuchsia_hardware_nand_Info nand_info;
72 size_t parent_op_size;
73 nand_proto.ops->query(nand_proto.ctx, &nand_info, &parent_op_size);
74 // Make sure parent_op_size is aligned, so we can safely add our data at the end.
75 parent_op_size = fbl::round_up(parent_op_size, 8u);
76
77 // Query parent for nand configuration info.
78 size_t actual;
79 nand_config_t nand_config;
80 zx_status_t status = device_get_metadata(parent, DEVICE_METADATA_PRIVATE, &nand_config,
81 sizeof(nand_config), &actual);
82 if (status != ZX_OK) {
83 zxlogf(ERROR, "nandpart: parent device '%s' has no device metadata\n",
84 device_get_name(parent));
85 return status;
86 }
87 if (actual < sizeof(nand_config_t)) {
88 zxlogf(ERROR, "nandpart: Expected metadata is of size %zu, needs to at least be %zu\n",
89 actual, sizeof(nand_config_t));
90 return ZX_ERR_INTERNAL;
91 }
92 // Create a bad block instance.
93 BadBlock::Config config = {
94 .bad_block_config = nand_config.bad_block_config,
95 .nand_proto = nand_proto,
96 };
97 fbl::RefPtr<BadBlock> bad_block;
98 status = BadBlock::Create(config, &bad_block);
99 if (status != ZX_OK) {
100 zxlogf(ERROR, "nandpart: Failed to create BadBlock object\n");
101 return status;
102 }
103
104 // Query parent for partition map.
105 uint8_t buffer[METADATA_PARTITION_MAP_MAX];
106 status = device_get_metadata(parent, DEVICE_METADATA_PARTITION_MAP, buffer, sizeof(buffer),
107 &actual);
108 if (status != ZX_OK) {
109 zxlogf(ERROR, "nandpart: parent device '%s' has no partition map\n",
110 device_get_name(parent));
111 return status;
112 }
113 if (actual < sizeof(zbi_partition_map_t)) {
114 zxlogf(ERROR, "nandpart: Partition map is of size %zu, needs to at least be %zu\n", actual,
115 sizeof(zbi_partition_t));
116 return ZX_ERR_INTERNAL;
117 }
118
119 auto* pmap = reinterpret_cast<zbi_partition_map_t*>(buffer);
120
121 const size_t minimum_size =
122 sizeof(zbi_partition_map_t) + (sizeof(zbi_partition_t) * pmap->partition_count);
123 if (actual < minimum_size) {
124 zxlogf(ERROR, "nandpart: Partition map is of size %zu, needs to at least be %zu\n", actual,
125 minimum_size);
126 return ZX_ERR_INTERNAL;
127 }
128
129 // Sanity check partition map and transform into expected form.
130 status = SanitizePartitionMap(pmap, nand_info);
131 if (status != ZX_OK) {
132 return status;
133 }
134
135 // Create a device for each partition.
136 for (unsigned i = 0; i < pmap->partition_count; i++) {
137 const auto* part = &pmap->partitions[i];
138
139 nand_info.num_blocks = static_cast<uint32_t>(part->last_block - part->first_block + 1);
140 memcpy(&nand_info.partition_guid, &part->type_guid, sizeof(nand_info.partition_guid));
141 // We only use FTL for the FVM partition.
142 if (memcmp(part->type_guid, fvm_guid, sizeof(fvm_guid)) == 0) {
143 nand_info.nand_class = fuchsia_hardware_nand_Class_FTL;
144 } else if (memcmp(part->type_guid, test_guid, sizeof(test_guid)) == 0) {
145 nand_info.nand_class = fuchsia_hardware_nand_Class_TEST;
146 } else {
147 nand_info.nand_class = fuchsia_hardware_nand_Class_BBS;
148 }
149
150 fbl::AllocChecker ac;
151 fbl::unique_ptr<NandPartDevice> device(new (&ac) NandPartDevice(
152 parent, nand_proto, bad_block, parent_op_size, nand_info,
153 static_cast<uint32_t>(part->first_block)));
154 if (!ac.check()) {
155 continue;
156 }
157 // Find optional partition_config information.
158 uint32_t copy_count = 1;
159 for (uint32_t i = 0; i < nand_config.extra_partition_config_count; i++) {
160 if (memcmp(nand_config.extra_partition_config[i].type_guid, part->type_guid,
161 sizeof(part->type_guid)) == 0 &&
162 nand_config.extra_partition_config[i].copy_count > 0) {
163 copy_count = nand_config.extra_partition_config[i].copy_count;
164 break;
165 }
166 }
167 status = device->Bind(part->name, copy_count);
168 if (status != ZX_OK) {
169 zxlogf(ERROR, "Failed to bind %s with error %d\n", part->name, status);
170
171 continue;
172 }
173 // devmgr is now in charge of the device.
174 __UNUSED auto* dummy = device.release();
175 }
176
177 return ZX_OK;
178 }
179
Bind(const char * name,uint32_t copy_count)180 zx_status_t NandPartDevice::Bind(const char* name, uint32_t copy_count) {
181 zxlogf(INFO, "nandpart: Binding %s to %s\n", name, device_get_name(parent()));
182
183 zx_device_prop_t props[] = {
184 {BIND_PROTOCOL, 0, ZX_PROTOCOL_NAND},
185 {BIND_NAND_CLASS, 0, nand_info_.nand_class},
186 };
187
188 zx_status_t status = DdkAdd(name, DEVICE_ADD_INVISIBLE, props, fbl::count_of(props));
189 if (status != ZX_OK) {
190 return status;
191 }
192
193 // Add empty partition map metadata to prevent this driver from binding to its child devices
194 status = DdkAddMetadata(DEVICE_METADATA_PARTITION_MAP, nullptr, 0);
195 if (status != ZX_OK) {
196 DdkRemove();
197 return status;
198 }
199
200 status = DdkAddMetadata(DEVICE_METADATA_PRIVATE, ©_count, sizeof(copy_count));
201 if (status != ZX_OK) {
202 DdkRemove();
203 return status;
204 }
205
206 DdkMakeVisible();
207 return ZX_OK;
208 }
209
NandQuery(fuchsia_hardware_nand_Info * info_out,size_t * nand_op_size_out)210 void NandPartDevice::NandQuery(fuchsia_hardware_nand_Info* info_out, size_t* nand_op_size_out) {
211 memcpy(info_out, &nand_info_, sizeof(*info_out));
212 // Add size of extra context.
213 *nand_op_size_out = parent_op_size_ + sizeof(NandPartOpContext);
214 }
215
NandQueue(nand_operation_t * op,nand_queue_callback completion_cb,void * cookie)216 void NandPartDevice::NandQueue(nand_operation_t* op, nand_queue_callback completion_cb,
217 void* cookie) {
218 auto* ctx =
219 reinterpret_cast<NandPartOpContext*>(reinterpret_cast<uintptr_t>(op) + parent_op_size_);
220 uint32_t command = op->command;
221
222 // Make offset relative to full underlying device
223 switch (command) {
224 case NAND_OP_READ:
225 case NAND_OP_WRITE:
226 ctx->offset = erase_block_start_ * nand_info_.pages_per_block;
227 op->rw.offset_nand += ctx->offset;
228 break;
229 case NAND_OP_ERASE:
230 ctx->offset = erase_block_start_;
231 op->erase.first_block += erase_block_start_;
232 break;
233 default:
234 completion_cb(cookie, ZX_ERR_NOT_SUPPORTED, op);
235 return;
236 }
237
238 ctx->completion_cb = completion_cb;
239 ctx->cookie = cookie;
240
241 // Call parent's queue
242 nand_.Queue(op, CompletionCallback, ctx);
243 }
244
NandGetFactoryBadBlockList(uint32_t * bad_blocks,size_t bad_block_len,size_t * num_bad_blocks)245 zx_status_t NandPartDevice::NandGetFactoryBadBlockList(uint32_t* bad_blocks, size_t bad_block_len,
246 size_t* num_bad_blocks) {
247 // TODO implement this.
248 *num_bad_blocks = 0;
249 return ZX_ERR_NOT_SUPPORTED;
250 }
251
BadBlockGetBadBlockList(uint32_t * bad_block_list,size_t bad_block_list_len,size_t * bad_block_count)252 zx_status_t NandPartDevice::BadBlockGetBadBlockList(
253 uint32_t* bad_block_list, size_t bad_block_list_len, size_t* bad_block_count) {
254
255 if (!bad_block_list_) {
256 const zx_status_t status = bad_block_->GetBadBlockList(
257 erase_block_start_, erase_block_start_ + nand_info_.num_blocks, &bad_block_list_);
258 if (status != ZX_OK) {
259 return status;
260 }
261 for (uint32_t i = 0; i < bad_block_list_.size(); i++) {
262 bad_block_list_[i] -= erase_block_start_;
263 }
264 }
265
266 *bad_block_count = bad_block_list_.size();
267 zxlogf(TRACE, "nandpart: %s: Bad block count: %zu\n", name(), *bad_block_count);
268
269 if (bad_block_list_len == 0 || bad_block_list_.size() == 0) {
270 return ZX_OK;
271 }
272 if (bad_block_list == NULL) {
273 return ZX_ERR_INVALID_ARGS;
274 }
275
276 const size_t size = sizeof(uint32_t) * fbl::min(*bad_block_count, bad_block_list_len);
277 memcpy(bad_block_list, bad_block_list_.get(), size);
278 return ZX_OK;
279 }
280
BadBlockMarkBlockBad(uint32_t block)281 zx_status_t NandPartDevice::BadBlockMarkBlockBad(uint32_t block) {
282 if (block >= nand_info_.num_blocks) {
283 return ZX_ERR_OUT_OF_RANGE;
284 }
285
286 // First, invalidate our cached copy.
287 bad_block_list_.reset();
288
289 // Second, "write-through" to actually persist.
290 block += erase_block_start_;
291 return bad_block_->MarkBlockBad(block);
292 }
293
DdkGetProtocol(uint32_t proto_id,void * protocol)294 zx_status_t NandPartDevice::DdkGetProtocol(uint32_t proto_id, void* protocol) {
295 auto* proto = static_cast<ddk::AnyProtocol*>(protocol);
296 proto->ctx = this;
297 switch (proto_id) {
298 case ZX_PROTOCOL_NAND:
299 proto->ops = &nand_protocol_ops_;
300 break;
301 case ZX_PROTOCOL_BAD_BLOCK:
302 proto->ops = &bad_block_protocol_ops_;
303 break;
304 default:
305 return ZX_ERR_NOT_SUPPORTED;
306 }
307 return ZX_OK;
308 }
309
310 } // namespace nand
311
nandpart_bind(void * ctx,zx_device_t * parent)312 extern "C" zx_status_t nandpart_bind(void* ctx, zx_device_t* parent) {
313 return nand::NandPartDevice::Create(parent);
314 }
315