1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "nand_driver.h"
6
7 #include <memory>
8 #include <new>
9
10 #include <ddk/debug.h>
11 #include <ddktl/protocol/badblock.h>
12 #include <fbl/array.h>
13 #include <zircon/assert.h>
14
15 #include "nand_operation.h"
16 #include "oob_doubler.h"
17
18 namespace {
19
20 class NandDriverImpl : public ftl::NandDriver {
21 public:
22 constexpr static bool kUseHardware = true;
NandDriverImpl(const nand_protocol_t * parent,const bad_block_protocol_t * bad_block)23 NandDriverImpl(const nand_protocol_t* parent, const bad_block_protocol_t* bad_block)
24 : parent_(parent, kUseHardware), bad_block_protocol_(bad_block) {}
~NandDriverImpl()25 ~NandDriverImpl() final {}
26
27 // NdmDriver interface:
28 const char* Init() final;
29 const char* Attach(const ftl::Volume* ftl_volume) final;
30 bool Detach() final;
31 int NandRead(uint32_t start_page, uint32_t page_count, void* page_buffer, void* oob_buffer) final;
32 int NandWrite(uint32_t start_page, uint32_t page_count, const void* page_buffer,
33 const void* oob_buffer) final;
34 int NandErase(uint32_t page_num) final;
35 int IsBadBlock(uint32_t page_num) final;
36 bool IsEmptyPage(uint32_t page_num, const uint8_t* data, const uint8_t* spare) final;
37
info() const38 const fuchsia_hardware_nand_Info& info() const final { return info_; }
39
40 private:
41 bool GetBadBlocks();
42
43 ftl::OobDoubler parent_;
44 size_t op_size_ = 0;
45 fuchsia_hardware_nand_Info info_ = {};
46 const bad_block_protocol_t* bad_block_protocol_;
47 fbl::Array<uint32_t> bad_blocks_;
48 };
49
Init()50 const char* NandDriverImpl::Init() {
51 parent_.Query(&info_, &op_size_);
52 zxlogf(INFO, "FTL: Nand: page_size %u, blk sz %u, %u blocks, %u ecc, %u oob, op size %lu\n",
53 info_.page_size, info_.pages_per_block, info_.num_blocks, info_.ecc_bits,
54 info_.oob_size, op_size_);
55
56 if (!GetBadBlocks()) {
57 return "Failed to query bad blocks";
58 }
59
60 ZX_DEBUG_ASSERT(info_.oob_size == 16);
61 return nullptr;
62 }
63
Attach(const ftl::Volume * ftl_volume)64 const char* NandDriverImpl::Attach(const ftl::Volume* ftl_volume) {
65 ftl::VolumeOptions options = {
66 .num_blocks = info_.num_blocks,
67 // This should be 2%, but that is of the whole device, not just this partition.
68 .max_bad_blocks = 82,
69 .block_size = info_.page_size * info_.pages_per_block,
70 .page_size = info_.page_size,
71 .eb_size = info_.oob_size,
72 .flags = 0 // Same as FSF_DRVR_PAGES (current default).
73 };
74 return CreateNdmVolume(ftl_volume, options);
75 }
76
Detach()77 bool NandDriverImpl::Detach() {
78 return RemoveNdmVolume();
79 }
80
81 // Returns kNdmOk, kNdmUncorrectableEcc, kNdmFatalError or kNdmUnsafeEcc.
NandRead(uint32_t start_page,uint32_t page_count,void * page_buffer,void * oob_buffer)82 int NandDriverImpl::NandRead(uint32_t start_page, uint32_t page_count, void* page_buffer,
83 void* oob_buffer) {
84 ftl::NandOperation operation(op_size_);
85 uint32_t data_pages = page_buffer ? page_count : 0;
86 size_t data_size = data_pages * info_.page_size;
87 size_t oob_size = (oob_buffer ? page_count : 0) * info_.oob_size;
88 size_t num_bytes = data_size + oob_size;
89
90 nand_operation_t* op = operation.GetOperation();
91 op->rw.command = NAND_OP_READ;
92 op->rw.offset_nand = start_page;
93 op->rw.length = page_count;
94
95 zx_status_t status = ZX_OK;
96 if (page_buffer) {
97 status = operation.SetDataVmo(num_bytes);
98 if (status != ZX_OK) {
99 zxlogf(ERROR, "FTL: SetDataVmo Failed: %d\n", status);
100 return ftl::kNdmFatalError;
101 }
102 }
103
104 if (oob_buffer) {
105 status = operation.SetOobVmo(num_bytes);
106 op->rw.offset_oob_vmo = data_pages;
107 if (status != ZX_OK) {
108 zxlogf(ERROR, "FTL: SetOobVmo Failed: %d\n", status);
109 return ftl::kNdmFatalError;
110 }
111 }
112
113 zxlogf(SPEW, "FTL: Read page, start %d, len %d\n", start_page, page_count);
114 status = operation.Execute(&parent_);
115 if (status != ZX_OK) {
116 zxlogf(ERROR, "FTL: Read failed: %d\n", status);
117 return ftl::kNdmFatalError;
118 }
119
120 if (page_buffer) {
121 memcpy(page_buffer, operation.buffer(), data_size);
122 }
123
124 if (oob_buffer) {
125 memcpy(oob_buffer, operation.buffer() + data_size, oob_size);
126 }
127
128 if (op->rw.corrected_bit_flips > info_.ecc_bits) {
129 return ftl::kNdmUncorrectableEcc;
130 }
131
132 // This threshold is somewhat arbitrary, and should be adjusted if we deal
133 // with multiple controllers (by making it part of the nand protocol), or
134 // if we find it inappropriate after running endurance tests. We could also
135 // decide we need the FTL to have a more active role detecting blocks that
136 // should be moved around.
137 if (op->rw.corrected_bit_flips > info_.ecc_bits / 2) {
138 return ftl::kNdmUnsafeEcc;
139 }
140
141 return ftl::kNdmOk;
142 }
143
144 // Returns kNdmOk, kNdmError or kNdmFatalError. kNdmError triggers marking the block as bad.
NandWrite(uint32_t start_page,uint32_t page_count,const void * page_buffer,const void * oob_buffer)145 int NandDriverImpl::NandWrite(uint32_t start_page, uint32_t page_count,
146 const void* page_buffer, const void* oob_buffer) {
147 ftl::NandOperation operation(op_size_);
148 uint32_t data_pages = page_buffer ? page_count : 0;
149 size_t data_size = data_pages * info_.page_size;
150 size_t oob_size = (oob_buffer ? page_count : 0) * info_.oob_size;
151 size_t num_bytes = data_size + oob_size;
152
153 nand_operation_t* op = operation.GetOperation();
154 op->rw.command = NAND_OP_WRITE;
155 op->rw.offset_nand = start_page;
156 op->rw.length = page_count;
157
158 zx_status_t status = ZX_OK;
159 if (page_buffer) {
160 status = operation.SetDataVmo(num_bytes);
161 if (status != ZX_OK) {
162 zxlogf(ERROR, "FTL: SetDataVmo Failed: %d\n", status);
163 return ftl::kNdmFatalError;
164 }
165 memcpy(operation.buffer(), page_buffer, data_size);
166 }
167
168 if (oob_buffer) {
169 status = operation.SetOobVmo(num_bytes);
170 op->rw.offset_oob_vmo = data_pages;
171 if (status != ZX_OK) {
172 zxlogf(ERROR, "FTL: SetOobVmo Failed: %d\n", status);
173 return ftl::kNdmFatalError;
174 }
175 memcpy(operation.buffer() + data_size, oob_buffer, oob_size);
176 }
177
178 zxlogf(SPEW, "FTL: Write page, start %d, len %d\n", start_page, page_count);
179 status = operation.Execute(&parent_);
180 if (status != ZX_OK) {
181 return ftl::kNdmError;
182 }
183
184 return ftl::kNdmOk;
185 }
186
187 // Returns kNdmOk or kNdmError. kNdmError triggers marking the block as bad.
NandErase(uint32_t page_num)188 int NandDriverImpl::NandErase(uint32_t page_num) {
189 uint32_t block_num = page_num / info_.pages_per_block;
190 ftl::NandOperation operation(op_size_);
191
192 nand_operation_t* op = operation.GetOperation();
193 op->erase.command = NAND_OP_ERASE;
194 op->erase.first_block = block_num;
195 op->erase.num_blocks = 1;
196
197 zxlogf(SPEW, "FTL: Erase block num %d\n", block_num);
198
199 zx_status_t status = operation.Execute(&parent_);
200 if (status != ZX_OK) {
201 zxlogf(ERROR, "FTL: NandErase failed: %d\n", status);
202 return ftl::kNdmError;
203 }
204
205 return ftl::kNdmOk;
206 }
207
208 // Returns kTrue, kFalse or kNdmError.
IsBadBlock(uint32_t page_num)209 int NandDriverImpl::IsBadBlock(uint32_t page_num) {
210 if (!bad_blocks_.size()) {
211 return ftl::kFalse;
212 }
213
214 // The list should be really short.
215 uint32_t block_num = page_num / info_.pages_per_block;
216 for (uint32_t bad_block : bad_blocks_) {
217 if (bad_block == block_num) {
218 zxlogf(ERROR, "FTL: IsBadBlock(%d) found\n", block_num);
219 return ftl::kTrue;
220 }
221 }
222 return ftl::kFalse;
223 }
224
IsEmptyPage(uint32_t page_num,const uint8_t * data,const uint8_t * spare)225 bool NandDriverImpl::IsEmptyPage(uint32_t page_num, const uint8_t* data, const uint8_t* spare) {
226 return IsEmptyPageImpl(data, info_.page_size, spare, info_.oob_size);
227 }
228
GetBadBlocks()229 bool NandDriverImpl::GetBadBlocks() {
230 if (!bad_block_protocol_->ops) {
231 return true;
232 }
233 ddk::BadBlockProtocolClient client(const_cast<bad_block_protocol_t*>(bad_block_protocol_));
234
235 size_t num_bad_blocks;
236 zx_status_t status = client.GetBadBlockList(nullptr, 0, &num_bad_blocks);
237 if (status != ZX_OK) {
238 return false;
239 }
240 if (!num_bad_blocks) {
241 return true;
242 }
243
244 std::unique_ptr<uint32_t[]> bad_block_list(new uint32_t[num_bad_blocks]);
245 size_t new_count;
246 status = client.GetBadBlockList(bad_block_list.get(), num_bad_blocks, &new_count);
247 if (status != ZX_OK) {
248 return false;
249 }
250 ZX_ASSERT(new_count == num_bad_blocks);
251
252 bad_blocks_ = fbl::Array<uint32_t>(bad_block_list.release(), num_bad_blocks);
253
254 for (uint32_t bad_block : bad_blocks_) {
255 zxlogf(ERROR, "FTL: Bad block: %x\n", bad_block);
256 }
257 return true;
258 }
259
260 } // namespace
261
262 namespace ftl {
263
264 // Static.
Create(const nand_protocol_t * parent,const bad_block_protocol_t * bad_block)265 std::unique_ptr<NandDriver> NandDriver::Create(const nand_protocol_t* parent,
266 const bad_block_protocol_t* bad_block) {
267 return std::unique_ptr<NandDriver>(new NandDriverImpl(parent, bad_block));
268 }
269
270 } // namespace ftl.
271