1 /*
2 * Copyright (c) 2006-2021, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2020-06-30 thread-liu first version
9 */
10
11 #include <rtthread.h>
12 #include <rtdevice.h>
13 #include <board.h>
14
15 #ifdef BSP_USING_NAND
16
17 #define DRV_DEBUG
18 #define LOG_TAG "drv.nand"
19 #include <drv_log.h>
20 #include "drv_nand.h"
21
22 #define NAND_RB_PIN GET_PIN(D, 6)
23
24 static rt_uint32_t ecc_rdbuf[NAND_MAX_PAGE_SIZE/NAND_ECC_SECTOR_SIZE];
25 static rt_uint32_t ecc_hdbuf[NAND_MAX_PAGE_SIZE/NAND_ECC_SECTOR_SIZE];
26 struct rthw_fmc
27 {
28 rt_uint32_t id;
29 struct rt_mutex lock;
30 };
31 static struct rthw_fmc _device = {0};
32
rt_hw_nand_gpio_init(void)33 static void rt_hw_nand_gpio_init(void)
34 {
35 GPIO_InitTypeDef GPIO_InitStruct = {0};
36 RCC_PeriphCLKInitTypeDef PeriphClkInit = {0};
37
38 if (IS_ENGINEERING_BOOT_MODE())
39 {
40 PeriphClkInit.PeriphClockSelection = RCC_PERIPHCLK_FMC;
41 PeriphClkInit.AdcClockSelection = RCC_FMCCLKSOURCE_ACLK;
42 if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInit) != HAL_OK)
43 {
44 Error_Handler();
45 }
46 }
47 __HAL_RCC_FMC_CLK_ENABLE();
48 __HAL_RCC_GPIOD_CLK_ENABLE();
49 __HAL_RCC_GPIOE_CLK_ENABLE();
50 __HAL_RCC_GPIOG_CLK_ENABLE();
51
52 /* PD6 R/B */
53 GPIO_InitStruct.Pin = GPIO_PIN_6;
54 GPIO_InitStruct.Mode = GPIO_MODE_INPUT;
55 GPIO_InitStruct.Pull = GPIO_NOPULL;
56 GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH;
57 HAL_GPIO_Init(GPIOD, &GPIO_InitStruct);
58
59 /* PG9 NCE */
60 GPIO_InitStruct.Pin = GPIO_PIN_9;
61 GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
62 GPIO_InitStruct.Pull = GPIO_NOPULL;
63 GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH;
64 GPIO_InitStruct.Alternate = GPIO_AF12_FMC;
65 HAL_GPIO_Init(GPIOG, &GPIO_InitStruct);
66
67 /* PD0,1,4,5,11,12,14,15 */
68 GPIO_InitStruct.Pin = GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_4 | GPIO_PIN_5 |
69 GPIO_PIN_11 | GPIO_PIN_12 | GPIO_PIN_14 | GPIO_PIN_15;
70 GPIO_InitStruct.Pull = GPIO_NOPULL;
71 HAL_GPIO_Init(GPIOD, &GPIO_InitStruct);
72
73 /* PE7,8,9,10 */
74 GPIO_InitStruct.Pin = GPIO_PIN_7 | GPIO_PIN_8 | GPIO_PIN_9 | GPIO_PIN_10;
75 HAL_GPIO_Init(GPIOE, &GPIO_InitStruct);
76 }
77
78 /* nand delay */
rt_hw_nand_delay(volatile uint32_t i)79 static void rt_hw_nand_delay(volatile uint32_t i)
80 {
81 while (i > 0)
82 {
83 i--;
84 }
85 }
86
87 /* read nand flash status */
rt_hw_nand_read_status(void)88 static rt_err_t rt_hw_nand_read_status(void)
89 {
90 rt_err_t result = RT_EOK;
91
92 NAND_CMD_AREA = NAND_READSTA;
93
94 rt_hw_nand_delay(NAND_TWHR_DELAY);
95
96 result = NAND_ADDR_AREA;
97
98 return result;
99 }
100
101 /* wait nand flash read */
rt_hw_nand_wait_ready(void)102 static rt_err_t rt_hw_nand_wait_ready(void)
103 {
104 rt_err_t result = RT_EOK;
105 static uint32_t time = 0;
106
107 while (1)
108 {
109 result = rt_hw_nand_read_status();
110
111 if (result & NAND_READY)
112 {
113 break;
114 }
115 time++;
116 if (time >= 0X1FFFFFFF)
117 {
118 return -RT_ETIMEOUT;
119 }
120 }
121
122 return RT_EOK;
123 }
124
125 /* set nand mode */
rt_hw_nand_set_mode(uint8_t mode)126 static rt_err_t rt_hw_nand_set_mode(uint8_t mode)
127 {
128 NAND_CMD_AREA = NAND_FEATURE;
129 NAND_DATA_AREA = 0x01;
130 NAND_ADDR_AREA = mode;
131 NAND_ADDR_AREA = 0;
132 NAND_ADDR_AREA = 0;
133 NAND_ADDR_AREA = 0;
134
135 if (rt_hw_nand_wait_ready() == RT_EOK)
136 {
137 return RT_EOK;
138 }
139 else
140 {
141 return -RT_ERROR;
142 }
143 }
144
145 /* reset nand flash */
rt_hw_nand_reset(void)146 static rt_err_t rt_hw_nand_reset(void)
147 {
148 NAND_CMD_AREA = NAND_RESET;
149
150 if (rt_hw_nand_wait_ready() == RT_EOK)
151 {
152 return RT_EOK; /* success */
153 }
154 else
155 {
156 return -RT_ERROR;
157 }
158 }
159
160 /* read nand flash id */
_read_id(struct rt_mtd_nand_device * device)161 static rt_err_t _read_id(struct rt_mtd_nand_device *device)
162 {
163 RT_ASSERT(device != RT_NULL);
164
165 uint8_t deviceid[5];
166
167 NAND_CMD_AREA = NAND_READID; /* read id command */
168 NAND_DATA_AREA = 0x00;
169
170 deviceid[0] = NAND_ADDR_AREA; /* Byte 0 */
171 deviceid[1] = NAND_ADDR_AREA; /* Byte 1 */
172 deviceid[2] = NAND_ADDR_AREA; /* Byte 2 */
173 deviceid[3] = NAND_ADDR_AREA; /* Byte 3 */
174 deviceid[4] = NAND_ADDR_AREA; /* Byte 4 */
175
176 _device.id = ((uint32_t)deviceid[4]) << 24 | ((uint32_t)deviceid[3]) << 16 | ((uint32_t)deviceid[2]) << 8 | deviceid[1];
177
178 LOG_D("nand id: 0x%08x", _device.id);
179
180 return RT_EOK;
181 }
182
rt_hw_nand_ecc_check(rt_uint32_t generatedEcc,rt_uint32_t readEcc,rt_uint8_t * data)183 static rt_err_t rt_hw_nand_ecc_check(rt_uint32_t generatedEcc, rt_uint32_t readEcc, rt_uint8_t* data)
184 {
185 #define ECC_MASK28 0x0FFFFFFF /* 28 valid ECC parity bits. */
186 #define ECC_MASK 0x05555555 /* 14 ECC parity bits. */
187
188 rt_uint32_t count, bitNum, byteAddr;
189 rt_uint32_t mask;
190 rt_uint32_t syndrome;
191 rt_uint32_t eccP; /* 14 even ECC parity bits. */
192 rt_uint32_t eccPn; /* 14 odd ECC parity bits. */
193
194 syndrome = (generatedEcc ^ readEcc) & ECC_MASK28;
195
196 if (syndrome == 0)
197 {
198 return (RT_EOK); /* No errors in data. */
199 }
200
201 eccPn = syndrome & ECC_MASK; /* Get 14 odd parity bits. */
202 eccP = (syndrome >> 1) & ECC_MASK; /* Get 14 even parity bits. */
203
204 if ((eccPn ^ eccP) == ECC_MASK) /* 1-bit correctable error ? */
205 {
206 bitNum = (eccP & 0x01) |
207 ((eccP >> 1) & 0x02) |
208 ((eccP >> 2) & 0x04);
209 LOG_D("ECC bit %d\n",bitNum);
210 byteAddr = ((eccP >> 6) & 0x001) |
211 ((eccP >> 7) & 0x002) |
212 ((eccP >> 8) & 0x004) |
213 ((eccP >> 9) & 0x008) |
214 ((eccP >> 10) & 0x010) |
215 ((eccP >> 11) & 0x020) |
216 ((eccP >> 12) & 0x040) |
217 ((eccP >> 13) & 0x080) |
218 ((eccP >> 14) & 0x100) |
219 ((eccP >> 15) & 0x200) |
220 ((eccP >> 16) & 0x400) ;
221
222 data[ byteAddr ] ^= 1 << bitNum;
223
224 return RT_EOK;
225 }
226
227 /* Count number of one's in the syndrome. */
228 count = 0;
229 mask = 0x00800000;
230 while (mask)
231 {
232 if (syndrome & mask)
233 count++;
234 mask >>= 1;
235 }
236
237 if (count == 1) /* Error in the ECC itself. */
238 return -RT_EIO;
239
240 return -RT_EIO; /* Unable to correct data. */
241
242 #undef ECC_MASK
243 #undef ECC_MASK24
244 }
245
_read_page(struct rt_mtd_nand_device * device,rt_off_t page,rt_uint8_t * data,rt_uint32_t data_len,rt_uint8_t * spare,rt_uint32_t spare_len)246 static rt_err_t _read_page(struct rt_mtd_nand_device *device,
247 rt_off_t page,
248 rt_uint8_t *data,
249 rt_uint32_t data_len,
250 rt_uint8_t *spare,
251 rt_uint32_t spare_len)
252 {
253 RT_ASSERT(device != RT_NULL);
254
255 rt_uint32_t index, i, tickstart, eccnum;
256 rt_err_t result;
257 rt_uint8_t *p = RT_NULL;
258
259 page = page + device->block_start * device->pages_per_block;
260 if (page / device->pages_per_block > device->block_end)
261 {
262 return -RT_EIO;
263 }
264
265 rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
266 if (data && data_len)
267 {
268 NAND_CMD_AREA = NAND_AREA_A;
269 NAND_DATA_AREA = (rt_uint8_t)0;
270 NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
271 NAND_DATA_AREA = (rt_uint8_t)page;
272 NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
273 NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
274 NAND_CMD_AREA = NAND_AREA_TRUE1;
275
276 rt_hw_nand_delay(10);
277
278 /* not an integer multiple of NAND ECC SECTOR SIZE, no ECC checks*/
279 if (data_len % NAND_ECC_SECTOR_SIZE)
280 {
281 for (i = 0; i < data_len; i++)
282 {
283 *data++ = NAND_ADDR_AREA;
284 }
285 }
286 else
287 {
288 eccnum = data_len/NAND_ECC_SECTOR_SIZE;
289 p = data;
290 for (index = 0; index < 4; index++)
291 {
292 FMC_Bank3_R->PCR |= 1<<6; /* enable ecc */
293
294 for (i = 0; i < NAND_ECC_SECTOR_SIZE; i++)
295 {
296 *data++ = NAND_ADDR_AREA;
297 }
298 /* Get tick */
299 tickstart = rt_tick_get();
300 /* Wait until FIFO is empty */
301 while ((FMC_Bank3_R->SR & (1 << 6)) == RESET)
302 {
303 /* Check for the Timeout */
304 if ((rt_tick_get() - tickstart) > 10000)
305 {
306 result = -RT_ETIMEOUT;
307 goto _exit;
308 }
309 }
310 ecc_hdbuf[index] = FMC_Bank3_R->HECCR; /* read hardware ecc */
311 FMC_Bank3_R->PCR &= ~(1<<6); /* disable ecc */
312 }
313 i = device->page_size + 0x10;
314
315 rt_hw_nand_delay(10);
316
317 NAND_CMD_AREA = 0x05;
318 NAND_DATA_AREA = (rt_uint8_t)i;
319 NAND_DATA_AREA = (rt_uint8_t)(i>>8);
320 NAND_CMD_AREA = 0xE0;
321
322 rt_hw_nand_delay(10);
323
324 data =(rt_uint8_t*)&ecc_rdbuf[0];
325 for (i = 0; i < 4*eccnum; i++)
326 {
327 *data++ = NAND_ADDR_AREA;
328 }
329 /* check ecc */
330 for(i = 0; i< eccnum; i++)
331 {
332 if(ecc_rdbuf[i] != ecc_hdbuf[i])
333 {
334 result = rt_hw_nand_ecc_check(ecc_hdbuf[i], ecc_rdbuf[i], p + NAND_ECC_SECTOR_SIZE*i);
335 if (result != RT_EOK)
336 {
337 goto _exit;
338 }
339 }
340 }
341 }
342 }
343 if (spare && spare_len)
344 {
345 NAND_CMD_AREA = NAND_AREA_A;
346 NAND_DATA_AREA = (rt_uint8_t)0;
347 NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
348 NAND_DATA_AREA = (rt_uint8_t)page;
349 NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
350 NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
351 NAND_CMD_AREA = NAND_AREA_TRUE1;
352 rt_thread_delay(10);
353
354 for (i = 0; i < spare_len; i ++)
355 {
356 *spare++ = NAND_ADDR_AREA;
357 }
358 }
359
360 if (rt_hw_nand_wait_ready() != RT_EOK)
361 {
362 result = -RT_ETIMEOUT;
363 goto _exit;
364 }
365
366 _exit:
367 rt_mutex_release(&_device.lock);
368
369 return result;
370 }
371
_write_page(struct rt_mtd_nand_device * device,rt_off_t page,const rt_uint8_t * data,rt_uint32_t data_len,const rt_uint8_t * spare,rt_uint32_t spare_len)372 static rt_err_t _write_page(struct rt_mtd_nand_device *device,
373 rt_off_t page,
374 const rt_uint8_t *data,
375 rt_uint32_t data_len,
376 const rt_uint8_t *spare,
377 rt_uint32_t spare_len)
378 {
379 RT_ASSERT(device != RT_NULL);
380
381 rt_err_t result = RT_EOK;
382 rt_uint32_t eccnum;
383 rt_uint32_t i, index;
384 rt_uint32_t tickstart = 0;
385
386 page = page + device->block_start * device->pages_per_block;
387 if (page / device->pages_per_block > device->block_end)
388 {
389 return -RT_EIO;
390 }
391
392 rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
393
394 if (data && data_len)
395 {
396 NAND_CMD_AREA = NAND_WRITE0;
397
398 NAND_DATA_AREA = (rt_uint8_t)0;
399 NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
400 NAND_DATA_AREA = (rt_uint8_t)(page & 0xFF);
401 NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
402 NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
403
404 rt_hw_nand_delay(10);
405
406 if (data_len % NAND_ECC_SECTOR_SIZE)
407 {
408 /* read nand flash */
409 for (i = 0; i < data_len; i++)
410 {
411 NAND_ADDR_AREA = *data++;
412 }
413 }
414 else
415 {
416 eccnum = data_len/NAND_ECC_SECTOR_SIZE;
417 for (index = 0; index < eccnum; index++)
418 {
419 FMC_Bank3_R->PCR |= 1<<6; /* enable ecc */
420
421 for (i = 0; i < NAND_ECC_SECTOR_SIZE; i++)
422 {
423 NAND_ADDR_AREA = *data++;
424 }
425 /* Get tick */
426 tickstart = rt_tick_get();
427 /* Wait until FIFO is empty */
428 while ((FMC_Bank3_R->SR & (1 << 6)) == RESET)
429 {
430 /* Check for the Timeout */
431 if ((rt_tick_get() - tickstart) > 10000)
432 {
433 result = -RT_ETIMEOUT;
434 goto _exit;
435 }
436 }
437 ecc_hdbuf[index] = FMC_Bank3_R->HECCR; /* read hardware ecc */
438 FMC_Bank3_R->PCR &= ~(1<<6); /* disable ecc */
439 }
440
441 i = device->page_size + 0x10;
442 rt_hw_nand_delay(10);
443 NAND_CMD_AREA = 0x85;
444 NAND_DATA_AREA = (rt_uint8_t)i;
445 NAND_DATA_AREA = (rt_uint8_t)(i>>8);
446 rt_hw_nand_delay(10);
447
448 data = (uint8_t*)&ecc_hdbuf[0];
449
450 for (index = 0; index < eccnum; index++)
451 {
452 for (i = 0; i < 4; i++)
453 {
454 NAND_ADDR_AREA = *data++;
455 }
456 }
457 }
458 }
459 NAND_CMD_AREA = NAND_WRITE_TURE1;
460 if (rt_hw_nand_wait_ready() != RT_EOK)
461 {
462 result = -RT_EIO;
463 goto _exit;
464 }
465
466 if (spare && spare_len)
467 {
468 NAND_CMD_AREA = NAND_WRITE0;
469 NAND_DATA_AREA = (rt_uint8_t)(4096 & 0xFF);
470 NAND_DATA_AREA = (rt_uint8_t)(4096 >> 8);
471 NAND_DATA_AREA = (rt_uint8_t)(page & 0xFF);
472 NAND_DATA_AREA = (rt_uint8_t)(page >> 8);
473 NAND_DATA_AREA = (rt_uint8_t)(page >> 16);
474
475 for (i = 4; i < spare_len; i++)
476 {
477 NAND_ADDR_AREA = spare[i];
478 }
479 NAND_CMD_AREA = NAND_WRITE_TURE1;
480 if (rt_hw_nand_wait_ready() != RT_EOK)
481 {
482 result = -RT_EIO;
483 goto _exit;
484 }
485 }
486 _exit:
487 rt_mutex_release(&_device.lock);
488
489 return result;
490 }
491
492 /* erase one block */
_erase_block(struct rt_mtd_nand_device * device,rt_uint32_t block)493 static rt_err_t _erase_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
494 {
495 RT_ASSERT(device != RT_NULL);
496 unsigned int block_num;
497 rt_err_t result = RT_EOK;
498
499 block = block + device->block_start;
500 block_num = block << 6;
501
502 rt_mutex_take(&_device.lock, RT_WAITING_FOREVER);
503
504 NAND_CMD_AREA = NAND_ERASE0;
505 NAND_DATA_AREA = (uint8_t)block_num;
506 NAND_DATA_AREA = (uint8_t)(block_num >> 8);
507 NAND_DATA_AREA = (uint8_t)(block_num >> 16);
508 NAND_CMD_AREA = NAND_ERASE1;
509
510 rt_thread_delay(NAND_TBERS_DELAY);
511
512 if (rt_hw_nand_wait_ready() != RT_EOK)
513 {
514 result = -RT_ERROR;
515 }
516
517 rt_mutex_release(&_device.lock);
518
519 return result;
520 }
521
_page_copy(struct rt_mtd_nand_device * device,rt_off_t src_page,rt_off_t dst_page)522 static rt_err_t _page_copy(struct rt_mtd_nand_device *device,
523 rt_off_t src_page,
524 rt_off_t dst_page)
525 {
526 RT_ASSERT(device != RT_NULL);
527 rt_err_t result = RT_EOK;
528 rt_uint32_t source_block = 0, dest_block = 0;
529
530 src_page = src_page + device->block_start * device->pages_per_block;
531 dst_page = dst_page + device->block_start * device->pages_per_block;
532 source_block = src_page / device->pages_per_block;
533 dest_block = dst_page / device->pages_per_block;
534 if ((source_block % 2) != (dest_block % 2))
535 {
536 return RT_MTD_ESRC;
537 }
538
539 NAND_CMD_AREA = NAND_MOVEDATA_CMD0;
540 NAND_DATA_AREA = (rt_uint8_t)(0 & 0xFF);
541 NAND_DATA_AREA = (rt_uint8_t)(0 >> 8);
542 NAND_DATA_AREA = (rt_uint8_t)(src_page & 0xFF);
543 NAND_DATA_AREA = (rt_uint8_t)(src_page >> 8);
544 NAND_DATA_AREA = (rt_uint8_t)(src_page >> 16);
545 NAND_CMD_AREA = NAND_MOVEDATA_CMD1;
546
547 rt_hw_nand_delay(10);
548
549 NAND_CMD_AREA = NAND_MOVEDATA_CMD2;
550 NAND_DATA_AREA = ((rt_uint8_t)(0 & 0xFF));
551 NAND_DATA_AREA = ((rt_uint8_t)(0 >> 8));
552 NAND_DATA_AREA = ((rt_uint8_t)(dst_page & 0xFF));
553 NAND_DATA_AREA = ((rt_uint8_t)(dst_page >> 8));
554 NAND_DATA_AREA = ((rt_uint8_t)(dst_page >> 16));
555 NAND_CMD_AREA = (NAND_MOVEDATA_CMD3);
556
557 if (rt_hw_nand_wait_ready() != RT_EOK)
558 {
559 result = -RT_ERROR;
560 }
561
562 return result;
563 }
564
_check_block(struct rt_mtd_nand_device * device,rt_uint32_t block)565 static rt_err_t _check_block(struct rt_mtd_nand_device *device, rt_uint32_t block)
566 {
567 RT_ASSERT(device != RT_NULL);
568 return (RT_MTD_EOK);
569 }
570
_mark_bad(struct rt_mtd_nand_device * device,rt_uint32_t block)571 static rt_err_t _mark_bad(struct rt_mtd_nand_device *device, rt_uint32_t block)
572 {
573 RT_ASSERT(device != RT_NULL);
574 return (RT_MTD_EOK);
575 }
576
577 static const struct rt_mtd_nand_driver_ops ops =
578 {
579 _read_id,
580 _read_page,
581 _write_page,
582 _page_copy,
583 _erase_block,
584 _check_block,
585 _mark_bad,
586 };
587 static struct rt_mtd_nand_device nand_dev;
588
nand_init(struct rt_mtd_nand_device * device)589 static rt_err_t nand_init(struct rt_mtd_nand_device *device)
590 {
591 RT_ASSERT(device != RT_NULL);
592 uint32_t tempreg = 0;
593
594 rt_hw_nand_gpio_init();
595
596 tempreg |= 0 << 1; /* disable Wait feature enable bit */
597 tempreg |= 0 << 4; /* Data bus width 8*/
598 tempreg |= 0 << 6; /* disable ECC */
599 tempreg |= 1 << 17; /* ECC page 512 BYTE */
600 tempreg |= 5 << 9; /* set TCLR */
601 tempreg |= 5 << 13; /* set TAR */
602 FMC_Bank3_R->PCR = tempreg; /* set nand control register */
603
604 tempreg &= 0;
605 tempreg |= 3 << 0; /* set MEMSET */
606 tempreg |= 5 << 8; /* set MEMWAIT */
607 tempreg |= 2 << 16; /* set MEMHOLD */
608 tempreg |= 3 << 24; /* set MEMHIZ */
609 FMC_Bank3_R->PMEM = tempreg;
610 FMC_Bank3_R->PATT = 0; /* Attribute memory space timing registers */
611 FMC_Bank3_R->PCR |= 1 << 2; /* NAND Flash memory bank enable bit */
612 FMC_Bank1_R->BTCR[0] |= (uint32_t)1 << 31; /* enable fmc */
613
614 rt_hw_nand_reset(); /* reset nand flash*/
615 rt_thread_delay(100);
616
617 /* read id */
618 _read_id(&nand_dev);
619
620 if (_device.id != MT29F8G08ABACAH4)
621 {
622 LOG_E("nand id 0x%08x not support", _device.id);
623 return -RT_ERROR; /* can't find nand flash */
624 }
625
626 rt_hw_nand_set_mode(4); /* set mode 4, high speed mode*/
627
628 return RT_EOK;
629 }
630
rt_hw_nand_init(void)631 int rt_hw_nand_init(void)
632 {
633 rt_err_t result = RT_EOK;
634
635 rt_pin_mode(NAND_RB_PIN, PIN_MODE_INPUT_PULLUP); /* nand flash R/B pin */
636
637 result = nand_init(&nand_dev);
638 if (result != RT_EOK)
639 {
640 LOG_D("nand flash init error!");
641 return -RT_ERROR;
642 }
643 rt_mutex_init(&_device.lock, "nand", RT_IPC_FLAG_PRIO);
644
645 nand_dev.page_size = 4096;
646 nand_dev.pages_per_block = 224;
647 nand_dev.plane_num = 2;
648 nand_dev.oob_size = 64;
649 nand_dev.oob_free = 64 - ((4096) * 3 / 256);
650 nand_dev.block_start = 0;
651 nand_dev.block_end = 4095;
652
653 nand_dev.block_total = nand_dev.block_end - nand_dev.block_start;
654 nand_dev.ops = &ops;
655
656 result = rt_mtd_nand_register_device("nand", &nand_dev);
657 if (result != RT_EOK)
658 {
659 rt_device_unregister(&nand_dev.parent);
660 return -RT_ERROR;
661 }
662
663 rt_kprintf("nand flash init success, id: 0x%08x\n", _device.id);
664
665 return RT_EOK;
666 }
667
668 INIT_DEVICE_EXPORT(rt_hw_nand_init);
669
670 #endif
671