1 /*
2 * Copyright (c) 2015 Gurjant Kalsi <me@gurjantkalsi.com>
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <lk/err.h>
9 #include <lk/pow2.h>
10 #include <stdlib.h>
11 #include <string.h>
12
13 #include <arch/arm/cm.h>
14 #include <kernel/event.h>
15 #include <kernel/mutex.h>
16 #include <lib/bio.h>
17 #include <platform.h>
18 #include <platform/n25qxxa.h>
19 #include <platform/n25q128a.h>
20 #include <platform/n25q512a.h>
21 #include <platform/qspi.h>
22 #include <lk/trace.h>
23
24 #define LOCAL_TRACE 0
25
26 #define FOUR_BYTE_ADDR_THRESHOLD (1 << 24)
27 #define LOCAL_TRACE 0
28 #define MAX_DMA_WAIT_MS 1024
29
30 typedef void (*CpltCallback)(void);
31
32 typedef enum {
33 QSPI_STATE_LINEAR,
34 QSPI_STATE_COMMAND,
35 QSPI_STATE_MAX
36 } device_state_t;
37 device_state_t device_state;
38
39
40 static QSPI_HandleTypeDef qspi_handle;
41 static DMA_Stream_TypeDef *dma2_stream7;
42 static CpltCallback cplt_callback;
43
44 static const char device_name[] = "qspi-flash";
45 static bdev_t qspi_flash_device;
46 static bio_erase_geometry_info_t geometry;
47
48 static mutex_t spiflash_mutex;
49
50 // Functions exported to Block I/O handler.
51 static ssize_t spiflash_bdev_read(struct bdev *device, void *buf, off_t offset, size_t len);
52 static ssize_t spiflash_bdev_read_block(struct bdev *device, void *buf, bnum_t block, uint count);
53 static ssize_t spiflash_bdev_write_block(struct bdev *device, const void *buf, bnum_t block, uint count);
54 static ssize_t spiflash_bdev_erase(struct bdev *device, off_t offset, size_t len);
55 static int spiflash_ioctl(struct bdev *device, int request, void *argp);
56
57 static ssize_t qspi_write_page_unsafe(uint32_t addr, const uint8_t *data);
58
59 static ssize_t qspi_erase(bdev_t *device, uint32_t block_addr, uint32_t instruction);
60 static ssize_t qspi_bulk_erase(bdev_t *device);
61 static ssize_t qspi_erase_sector(bdev_t *device, uint32_t block_addr);
62 static ssize_t qspi_erase_subsector(bdev_t *device, uint32_t block_addr);
63 static status_t qspi_auto_polling_mem_ready_unsafe(QSPI_HandleTypeDef *hqspi, uint8_t match, uint8_t mask);
64
65 static HAL_StatusTypeDef qspi_cmd(QSPI_HandleTypeDef *, QSPI_CommandTypeDef *);
66 static HAL_StatusTypeDef qspi_tx_dma(QSPI_HandleTypeDef *, QSPI_CommandTypeDef *, uint8_t *);
67 static HAL_StatusTypeDef qspi_rx_dma(QSPI_HandleTypeDef *, QSPI_CommandTypeDef *, uint8_t *);
68
69 static status_t qspi_enable_linear(void);
70 static status_t qspi_disable_linear(void);
71 static bool qspi_is_linear(void);
72
73 status_t qspi_dma_init(QSPI_HandleTypeDef *hqspi);
74
75 static uint32_t get_specialized_instruction(uint32_t instruction, uint32_t address);
76 static uint32_t get_address_size(uint32_t address);
77
78 static event_t cmd_event;
79 static event_t rx_event;
80 static event_t tx_event;
81 static event_t st_event;
82
83 status_t hal_error_to_status(HAL_StatusTypeDef hal_status);
84
85 // Unsetting the DMA Enable bit in the DMA Control register isn't enough to
86 // disable the DMA Engine since DMA transfers may still be in progress.
87 // We have to wait for the DMA Engine to acknowledge being disabled by watching
88 // the DMA Enable bit.
dma_disable(DMA_Stream_TypeDef * dma)89 static status_t dma_disable(DMA_Stream_TypeDef *dma) {
90 // Unset the DMA Enable bit.
91 dma->CR &= ~DMA_SxCR_EN;
92
93 lk_time_t start_time = current_time();
94
95 while (dma->CR & DMA_SxCR_EN) {
96
97 dma->CR &= ~DMA_SxCR_EN;
98
99 if (current_time() - start_time > MAX_DMA_WAIT_MS) {
100 return ERR_TIMED_OUT;
101 }
102 }
103
104 return NO_ERROR;
105 }
106
107 // Must hold spiflash_mutex before calling.
qspi_write_enable_unsafe(QSPI_HandleTypeDef * hqspi)108 static status_t qspi_write_enable_unsafe(QSPI_HandleTypeDef *hqspi) {
109 HAL_StatusTypeDef status;
110
111 static const QSPI_CommandTypeDef s_command = {
112 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
113 .Instruction = WRITE_ENABLE_CMD,
114 .AddressMode = QSPI_ADDRESS_NONE,
115 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
116 .DataMode = QSPI_DATA_NONE,
117 .DummyCycles = 0,
118 .DdrMode = QSPI_DDR_MODE_DISABLE,
119 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
120 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD
121 };
122
123 status = HAL_QSPI_Command(hqspi, &s_command, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
124 if (status != HAL_OK) {
125 dprintf(CRITICAL, "%s: HAL_QSPI_Command failed with err = %d\n",
126 __func__, status);
127 return hal_error_to_status(status);
128 }
129
130 status = qspi_auto_polling_mem_ready_unsafe(hqspi, N25QXXA_SR_WREN, N25QXXA_SR_WREN);
131 if (status != HAL_OK) {
132 dprintf(CRITICAL, "%s: auto_polling_mem_ready failed with err = %d\n",
133 __func__, status);
134 return hal_error_to_status(status);
135 }
136
137 return NO_ERROR;
138 }
139
140 // Must hold spiflash_mutex before calling.
qspi_dummy_cycles_cfg_unsafe(QSPI_HandleTypeDef * hqspi)141 static status_t qspi_dummy_cycles_cfg_unsafe(QSPI_HandleTypeDef *hqspi) {
142 uint8_t reg;
143 HAL_StatusTypeDef status;
144
145 /* Initialize the read volatile configuration register command */
146 static const QSPI_CommandTypeDef init_rvcr_cmd = {
147 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
148 .Instruction = READ_VOL_CFG_REG_CMD,
149 .AddressMode = QSPI_ADDRESS_NONE,
150 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
151 .DataMode = QSPI_DATA_1_LINE,
152 .DummyCycles = 0,
153 .NbData = 1,
154 .DdrMode = QSPI_DDR_MODE_DISABLE,
155 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
156 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD
157 };
158
159 /* Configure the command */
160 status = HAL_QSPI_Command(hqspi, &init_rvcr_cmd, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
161 if (status != HAL_OK) {
162 dprintf(CRITICAL, "%s: HAL_QSPI_Command(init_rvcr_cmd) failed with err = %d\n",
163 __func__, status);
164 return hal_error_to_status(status);
165 }
166
167 /* Reception of the data */
168 status = HAL_QSPI_Receive(hqspi, ®, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
169 if (status != HAL_OK) {
170 dprintf(CRITICAL, "%s: HAL_QSPI_Receive failed with err = %d\n",
171 __func__, status);
172 return hal_error_to_status(status);
173 }
174
175 /* Enable write operations */
176 status = qspi_write_enable_unsafe(hqspi);
177 if (status != NO_ERROR) {
178 dprintf(CRITICAL, "%s: HAL_QSPI_Receive failed with err = %d\n",
179 __func__, status);
180 return status;
181 }
182
183 /* Update volatile configuration register (with new dummy cycles) */
184 static const QSPI_CommandTypeDef update_rvcr_cmd = {
185 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
186 .Instruction = WRITE_VOL_CFG_REG_CMD,
187 .AddressMode = QSPI_ADDRESS_NONE,
188 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
189 .DataMode = QSPI_DATA_1_LINE,
190 .DummyCycles = 0,
191 .NbData = 1,
192 .DdrMode = QSPI_DDR_MODE_DISABLE,
193 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
194 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD
195 };
196 MODIFY_REG(
197 reg, N25QXXA_VCR_NB_DUMMY,
198 (N25QXXA_DUMMY_CYCLES_READ_QUAD << POSITION_VAL(N25QXXA_VCR_NB_DUMMY)));
199
200 /* Configure the write volatile configuration register command */
201 status = HAL_QSPI_Command(hqspi, &update_rvcr_cmd, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
202 if (status != HAL_OK) {
203 dprintf(CRITICAL, "%s: HAL_QSPI_Command(update_rvcr_cmd) failed with err = %d\n",
204 __func__, status);
205 return hal_error_to_status(status);
206 }
207
208 /* Transmission of the data */
209 status = HAL_QSPI_Transmit(hqspi, ®, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
210 if (status != HAL_OK) {
211 dprintf(CRITICAL, "%s: HAL_QSPI_Transmit failed with err = %d\n",
212 __func__, status);
213 return hal_error_to_status(status);
214 }
215
216 return NO_ERROR;
217 }
218
219 // Must hold spiflash_mutex before calling.
qspi_auto_polling_mem_ready_unsafe(QSPI_HandleTypeDef * hqspi,uint8_t match,uint8_t mask)220 static status_t qspi_auto_polling_mem_ready_unsafe(QSPI_HandleTypeDef *hqspi, uint8_t match, uint8_t mask) {
221 QSPI_AutoPollingTypeDef s_config;
222 HAL_StatusTypeDef status;
223
224 static const QSPI_CommandTypeDef s_command = {
225 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
226 .Instruction = READ_STATUS_REG_CMD,
227 .AddressMode = QSPI_ADDRESS_NONE,
228 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
229 .DataMode = QSPI_DATA_1_LINE,
230 .DummyCycles = 0,
231 .DdrMode = QSPI_DDR_MODE_DISABLE,
232 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
233 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD,
234 .NbData = 1
235 };
236
237 s_config.Match = match;
238 s_config.Mask = mask;
239 s_config.MatchMode = QSPI_MATCH_MODE_AND;
240 s_config.StatusBytesSize = 1;
241 s_config.Interval = 0x10;
242 s_config.AutomaticStop = QSPI_AUTOMATIC_STOP_ENABLE;
243
244 status = HAL_QSPI_AutoPolling_IT(hqspi, &s_command, &s_config);
245 if (status != HAL_OK) {
246 dprintf(CRITICAL, "%s: HAL_QSPI_AutoPolling_IT failed with err = %d\n",
247 __func__, status);
248 return hal_error_to_status(status);
249 }
250 event_wait(&st_event);
251
252 return NO_ERROR;
253 }
254
255 // Must hold spiflash_mutex before calling.
qspi_reset_memory_unsafe(QSPI_HandleTypeDef * hqspi)256 static status_t qspi_reset_memory_unsafe(QSPI_HandleTypeDef *hqspi) {
257 QSPI_CommandTypeDef s_command;
258 HAL_StatusTypeDef status;
259
260 /* Initialize the reset enable command */
261 s_command.InstructionMode = QSPI_INSTRUCTION_1_LINE;
262 s_command.Instruction = RESET_ENABLE_CMD;
263 s_command.AddressMode = QSPI_ADDRESS_NONE;
264 s_command.AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE;
265 s_command.DataMode = QSPI_DATA_NONE;
266 s_command.DummyCycles = 0;
267 s_command.DdrMode = QSPI_DDR_MODE_DISABLE;
268 s_command.DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY;
269 s_command.SIOOMode = QSPI_SIOO_INST_EVERY_CMD;
270
271 /* Send the command */
272 status = HAL_QSPI_Command(hqspi, &s_command, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
273 if (status != HAL_OK) {
274 dprintf(CRITICAL, "%s: HAL_QSPI_Command(RESET_ENABLE_CMD) failed with err = %d\n",
275 __func__, status);
276 return hal_error_to_status(status);
277 }
278
279 /* Send the reset memory command */
280 s_command.Instruction = RESET_MEMORY_CMD;
281 status = HAL_QSPI_Command(hqspi, &s_command, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
282 if (status != HAL_OK) {
283 dprintf(CRITICAL, "%s: HAL_QSPI_Command(RESET_MEMORY_CMD) failed with err = %d\n",
284 __func__, status);
285 return hal_error_to_status(status);
286 }
287
288 /* Configure automatic polling mode to wait the memory is ready */
289 status = qspi_auto_polling_mem_ready_unsafe(hqspi, 0, N25QXXA_SR_WIP);
290 if (status != NO_ERROR) {
291 dprintf(CRITICAL, "%s: auto_polling_mem_ready failed with err = %d\n",
292 __func__, status);
293 return hal_error_to_status(status);
294 }
295
296 return NO_ERROR;
297 }
298
spiflash_bdev_read_block(struct bdev * device,void * buf,bnum_t block,uint count)299 static ssize_t spiflash_bdev_read_block(struct bdev *device, void *buf,
300 bnum_t block, uint count) {
301 LTRACEF("device %p, buf %p, block %u, count %u\n",
302 device, buf, block, count);
303
304 if (!IS_ALIGNED((uintptr_t)buf, CACHE_LINE)) {
305 DEBUG_ASSERT(IS_ALIGNED((uintptr_t)buf, CACHE_LINE));
306 return ERR_INVALID_ARGS;
307 }
308
309 count = bio_trim_block_range(device, block, count);
310 if (count == 0)
311 return 0;
312
313 QSPI_CommandTypeDef s_command;
314 HAL_StatusTypeDef status;
315
316 uint64_t largest_offset = (block + count) * device->block_size - 1;
317
318 // /* Initialize the read command */
319 s_command.InstructionMode = QSPI_INSTRUCTION_1_LINE;
320 s_command.Instruction = get_specialized_instruction(QUAD_OUT_FAST_READ_CMD, largest_offset);
321 s_command.AddressMode = QSPI_ADDRESS_1_LINE;
322 s_command.AddressSize = get_address_size(largest_offset);
323 s_command.AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE;
324 s_command.DataMode = QSPI_DATA_4_LINES;
325 s_command.DummyCycles = N25QXXA_DUMMY_CYCLES_READ_QUAD;
326 s_command.DdrMode = QSPI_DDR_MODE_DISABLE;
327 s_command.DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY;
328 s_command.SIOOMode = QSPI_SIOO_INST_EVERY_CMD;
329
330 s_command.NbData = device->block_size;
331
332 ssize_t retcode = 0;
333
334 mutex_acquire(&spiflash_mutex);
335
336 s_command.Address = block * device->block_size;
337 for (uint i = 0; i < count; i++) {
338
339 status = HAL_QSPI_Command(&qspi_handle, &s_command, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
340 if (status != HAL_OK) {
341 retcode = hal_error_to_status(status);
342 dprintf(CRITICAL, "%s: HAL_QSPI_Command failed with err = %ld\n",
343 __func__, retcode);
344 goto err;
345 }
346
347 // /* Reception of the data */
348 status = qspi_rx_dma(&qspi_handle, &s_command, buf);
349 if (status != HAL_OK) {
350 retcode = hal_error_to_status(status);
351 dprintf(CRITICAL, "%s: qspi_rx_dma failed with err = %ld\n",
352 __func__, retcode);
353 goto err;
354 }
355
356 buf += device->block_size;
357 retcode += device->block_size;
358 s_command.Address += device->block_size;
359 }
360
361 err:
362 mutex_release(&spiflash_mutex);
363 return retcode;
364 }
365
spiflash_bdev_write_block(struct bdev * device,const void * _buf,bnum_t block,uint count)366 static ssize_t spiflash_bdev_write_block(struct bdev *device, const void *_buf,
367 bnum_t block, uint count) {
368 count = bio_trim_block_range(device, block, count);
369 if (count == 0) {
370 return 0;
371 }
372
373 const uint8_t *buf = _buf;
374
375 mutex_acquire(&spiflash_mutex);
376
377 ssize_t total_bytes_written = 0;
378 for (; count > 0; count--, block++) {
379 ssize_t bytes_written = qspi_write_page_unsafe(block * N25QXXA_PAGE_SIZE, buf);
380 if (bytes_written < 0) {
381 dprintf(CRITICAL, "%s: qspi_write_page_unsafe failed with err = %ld\n",
382 __func__, bytes_written);
383 total_bytes_written = bytes_written;
384 goto err;
385 }
386
387 buf += N25QXXA_PAGE_SIZE;
388 total_bytes_written += bytes_written;
389 }
390
391 err:
392 mutex_release(&spiflash_mutex);
393 return total_bytes_written;
394 }
395
spiflash_bdev_erase(struct bdev * device,off_t offset,size_t len)396 static ssize_t spiflash_bdev_erase(struct bdev *device, off_t offset,
397 size_t len) {
398 len = bio_trim_range(device, offset, len);
399 if (len == 0) {
400 return 0;
401 }
402
403 ssize_t total_erased = 0;
404
405 mutex_acquire(&spiflash_mutex);
406
407 // Choose an erase strategy based on the number of bytes being erased.
408 if (len == device->total_size && offset == 0) {
409 // Bulk erase the whole flash.
410 total_erased = qspi_bulk_erase(device);
411 goto finish;
412 }
413
414 // Erase as many sectors as necessary, then switch to subsector erase for
415 // more fine grained erasure.
416 while (((ssize_t)len - total_erased) >= N25QXXA_SECTOR_SIZE) {
417 ssize_t erased = qspi_erase_sector(device, offset);
418 if (erased < 0) {
419 total_erased = erased;
420 goto finish;
421 }
422 total_erased += erased;
423 offset += erased;
424 }
425
426 while (total_erased < (ssize_t)len) {
427 ssize_t erased = qspi_erase_subsector(device, offset);
428 if (erased < 0) {
429 total_erased = erased;
430 goto finish;
431 }
432 total_erased += erased;
433 offset += erased;
434 }
435
436 finish:
437 mutex_release(&spiflash_mutex);
438 return total_erased;
439 }
440
spiflash_ioctl(struct bdev * device,int request,void * argp)441 static int spiflash_ioctl(struct bdev *device, int request, void *argp) {
442 int ret = NO_ERROR;
443
444 switch (request) {
445 case BIO_IOCTL_GET_MEM_MAP:
446 /* put the device into linear mode */
447 ret = qspi_enable_linear();
448 // Fallthrough.
449 case BIO_IOCTL_GET_MAP_ADDR:
450 if (argp)
451 *(void **)argp = (void *)QSPI_BASE;
452 break;
453 case BIO_IOCTL_PUT_MEM_MAP:
454 ret = qspi_disable_linear();
455 break;
456 case BIO_IOCTL_IS_MAPPED:
457 if (argp)
458 *(void **)argp = (void *)qspi_is_linear();
459 break;
460 default:
461 ret = ERR_NOT_SUPPORTED;
462 }
463
464 return ret;
465 }
466
qspi_write_page_unsafe(uint32_t addr,const uint8_t * data)467 static ssize_t qspi_write_page_unsafe(uint32_t addr, const uint8_t *data) {
468 if (!IS_ALIGNED(addr, N25QXXA_PAGE_SIZE)) {
469 return ERR_INVALID_ARGS;
470 }
471
472 HAL_StatusTypeDef status;
473
474 QSPI_CommandTypeDef s_command = {
475 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
476 .Instruction = get_specialized_instruction(QUAD_IN_FAST_PROG_CMD, addr),
477 .AddressMode = QSPI_ADDRESS_1_LINE,
478 .AddressSize = get_address_size(addr),
479 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
480 .DataMode = QSPI_DATA_4_LINES,
481 .DummyCycles = 0,
482 .DdrMode = QSPI_DDR_MODE_DISABLE,
483 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
484 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD,
485 .Address = addr,
486 .NbData = N25QXXA_PAGE_SIZE
487 };
488
489 status_t write_enable_result = qspi_write_enable_unsafe(&qspi_handle);
490 if (write_enable_result != NO_ERROR) {
491 dprintf(CRITICAL, "%s: qspi_write_enable_unsafe failed with err = %d\n",
492 __func__, write_enable_result);
493 return write_enable_result;
494 }
495
496 status = HAL_QSPI_Command(&qspi_handle, &s_command, HAL_QPSI_TIMEOUT_DEFAULT_VALUE);
497 if (status != HAL_OK) {
498 dprintf(CRITICAL, "%s: HAL_QSPI_Command failed with err = %d\n",
499 __func__, status);
500 return hal_error_to_status(status);
501 }
502
503 status = qspi_tx_dma(&qspi_handle, &s_command, (uint8_t *)data);
504 if (status != HAL_OK) {
505 dprintf(CRITICAL, "%s: qspi_tx_dma failed with err = %d\n",
506 __func__, status);
507 return hal_error_to_status(status);
508 }
509
510 status_t auto_polling_mem_ready_result =
511 qspi_auto_polling_mem_ready_unsafe(&qspi_handle, 0, N25QXXA_SR_WIP);
512 if (auto_polling_mem_ready_result != NO_ERROR) {
513 dprintf(CRITICAL, "%s: auto_polling_mem_ready failed with err = %d\n",
514 __func__, auto_polling_mem_ready_result);
515 return auto_polling_mem_ready_result;
516 }
517
518 return N25QXXA_PAGE_SIZE;
519 }
520
521
qspi_flash_init(size_t flash_size)522 status_t qspi_flash_init(size_t flash_size) {
523 status_t result = NO_ERROR;
524
525 event_init(&cmd_event, false, EVENT_FLAG_AUTOUNSIGNAL);
526 event_init(&tx_event, false, EVENT_FLAG_AUTOUNSIGNAL);
527 event_init(&rx_event, false, EVENT_FLAG_AUTOUNSIGNAL);
528 event_init(&st_event, false, EVENT_FLAG_AUTOUNSIGNAL);
529
530 mutex_init(&spiflash_mutex);
531 result = mutex_acquire(&spiflash_mutex);
532 if (result != NO_ERROR) {
533 return result;
534 }
535
536 qspi_handle.Instance = QUADSPI;
537
538 HAL_StatusTypeDef status;
539
540 // Enable the QuadSPI memory interface clock
541 __HAL_RCC_QSPI_CLK_ENABLE();
542
543 // Reset the QuadSPI memory interface
544 __HAL_RCC_QSPI_FORCE_RESET();
545 __HAL_RCC_QSPI_RELEASE_RESET();
546
547 // Setup the QSPI Flash device.
548 qspi_handle.Init.ClockPrescaler = 1;
549 qspi_handle.Init.FifoThreshold = 4;
550 qspi_handle.Init.SampleShifting = QSPI_SAMPLE_SHIFTING_HALFCYCLE;
551 qspi_handle.Init.FlashSize = POSITION_VAL(flash_size) - 1;
552 qspi_handle.Init.ChipSelectHighTime = QSPI_CS_HIGH_TIME_2_CYCLE;
553 qspi_handle.Init.ClockMode = QSPI_CLOCK_MODE_0;
554 qspi_handle.Init.FlashID = QSPI_FLASH_ID_1;
555 qspi_handle.Init.DualFlash = QSPI_DUALFLASH_DISABLE;
556
557 status = HAL_QSPI_Init(&qspi_handle);
558 if (status != HAL_OK) {
559 result = hal_error_to_status(status);
560 dprintf(CRITICAL, "%s: HAL_QSPI_Init failed with err = %d\n",
561 __func__, result);
562 goto err;
563 }
564
565 // enable the qspi interrupt
566 HAL_NVIC_EnableIRQ(QUADSPI_IRQn);
567
568 result = qspi_reset_memory_unsafe(&qspi_handle);
569 if (result != NO_ERROR) {
570 dprintf(CRITICAL, "%s: qspi_reset_memory_unsafe failed with err = %d\n",
571 __func__, result);
572 goto err;
573 }
574
575 result = qspi_dummy_cycles_cfg_unsafe(&qspi_handle);
576 if (result != NO_ERROR) {
577 dprintf(CRITICAL, "%s: qspi_dummy_cycles_cfg_unsafe failed with err = %d\n",
578 __func__, result);
579 goto err;
580 }
581
582 result = qspi_dma_init(&qspi_handle);
583 if (result != NO_ERROR) {
584 dprintf(CRITICAL, "%s: qspi_dma_init failed with err = %d\n",
585 __func__, result);
586 goto err;
587 }
588
589 result = hal_error_to_status(HAL_QSPI_Abort(&qspi_handle));
590 if (result != NO_ERROR) {
591 dprintf(CRITICAL, "%s: HAL_QSPI_Abort failed with err = %d\n",
592 __func__, result);
593 goto err;
594 }
595 device_state = QSPI_STATE_COMMAND;
596
597 // Initialize the QSPI Flash and register it as a Block I/O device.
598 geometry.erase_size = log2_uint(N25QXXA_SUBSECTOR_SIZE);
599 geometry.erase_shift = log2_uint(N25QXXA_SUBSECTOR_SIZE);
600 geometry.start = 0;
601 geometry.size = flash_size;
602
603 bio_initialize_bdev(&qspi_flash_device, device_name, N25QXXA_PAGE_SIZE,
604 (flash_size / N25QXXA_PAGE_SIZE), 1, &geometry,
605 BIO_FLAG_CACHE_ALIGNED_READS);
606
607 // qspi_flash_device.read: Use default hook.
608 qspi_flash_device.read_block = &spiflash_bdev_read_block;
609 // qspi_flash_device.write has a default hook that will be okay
610 qspi_flash_device.write_block = &spiflash_bdev_write_block;
611 qspi_flash_device.erase = &spiflash_bdev_erase;
612 qspi_flash_device.ioctl = &spiflash_ioctl;
613
614 /* we erase to 0xff */
615 qspi_flash_device.erase_byte = 0xff;
616
617 bio_register_device(&qspi_flash_device);
618
619 err:
620 mutex_release(&spiflash_mutex);
621 return result;
622 }
623
hal_error_to_status(HAL_StatusTypeDef hal_status)624 status_t hal_error_to_status(HAL_StatusTypeDef hal_status) {
625 switch (hal_status) {
626 case HAL_OK:
627 return NO_ERROR;
628 case HAL_ERROR:
629 return ERR_GENERIC;
630 case HAL_BUSY:
631 return ERR_BUSY;
632 case HAL_TIMEOUT:
633 return ERR_TIMED_OUT;
634 default:
635 return ERR_GENERIC;
636 }
637 }
638
qspi_erase(bdev_t * device,uint32_t block_addr,uint32_t instruction)639 static ssize_t qspi_erase(bdev_t *device, uint32_t block_addr, uint32_t instruction) {
640 if (instruction == BULK_ERASE_CMD && block_addr != 0) {
641 // This call was probably not what the user intended since the
642 // block_addr is irrelevant when performing a bulk erase.
643 return ERR_INVALID_ARGS;
644 }
645
646 QSPI_CommandTypeDef erase_cmd;
647 ssize_t num_erased_bytes;
648 switch (instruction) {
649 case SUBSECTOR_ERASE_CMD: {
650 num_erased_bytes = N25QXXA_SUBSECTOR_SIZE;
651 erase_cmd.AddressSize = get_address_size(block_addr);
652 erase_cmd.Instruction = get_specialized_instruction(instruction, block_addr);
653 erase_cmd.AddressMode = QSPI_ADDRESS_1_LINE;
654 erase_cmd.Address = block_addr;
655
656 break;
657 }
658 case SECTOR_ERASE_CMD: {
659 num_erased_bytes = N25QXXA_SECTOR_SIZE;
660 erase_cmd.AddressSize = get_address_size(block_addr);
661 erase_cmd.Instruction = get_specialized_instruction(instruction, block_addr);
662 erase_cmd.AddressMode = QSPI_ADDRESS_1_LINE;
663 erase_cmd.Address = block_addr;
664
665 break;
666 }
667 case BULK_ERASE_CMD: {
668 num_erased_bytes = device->total_size;
669 erase_cmd.AddressMode = QSPI_ADDRESS_NONE;
670 erase_cmd.Instruction = instruction;
671 break;
672 }
673 default: {
674 // Instruction must be a valid erase instruction.
675 return ERR_INVALID_ARGS;
676 }
677 }
678
679 erase_cmd.InstructionMode = QSPI_INSTRUCTION_1_LINE;
680 erase_cmd.AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE;
681 erase_cmd.DataMode = QSPI_DATA_NONE;
682 erase_cmd.DummyCycles = 0;
683 erase_cmd.DdrMode = QSPI_DDR_MODE_DISABLE;
684 erase_cmd.DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY;
685 erase_cmd.SIOOMode = QSPI_SIOO_INST_EVERY_CMD;
686
687
688 /* Enable write operations */
689 status_t qspi_write_enable_result = qspi_write_enable_unsafe(&qspi_handle);
690 if (qspi_write_enable_result != NO_ERROR) {
691 dprintf(CRITICAL, "%s: qspi_write_enable_unsafe failed with err = %d\n",
692 __func__, qspi_write_enable_result);
693 return qspi_write_enable_result;
694 }
695
696 /* Send the command */
697 if (HAL_QSPI_Command(&qspi_handle, &erase_cmd, HAL_QPSI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
698 return ERR_GENERIC;
699 }
700
701 /* Configure automatic polling mode to wait for end of erase */
702 status_t auto_polling_mem_ready_result =
703 qspi_auto_polling_mem_ready_unsafe(&qspi_handle, 0, N25QXXA_SR_WIP);
704 if (auto_polling_mem_ready_result != NO_ERROR) {
705 dprintf(CRITICAL, "%s: auto_polling_mem_ready failed with err = %d\n",
706 __func__, auto_polling_mem_ready_result);
707 return auto_polling_mem_ready_result;
708 }
709
710 return num_erased_bytes;
711 }
712
qspi_bulk_erase(bdev_t * device)713 static ssize_t qspi_bulk_erase(bdev_t *device) {
714 return qspi_erase(device, 0, BULK_ERASE_CMD);
715 }
716
qspi_erase_sector(bdev_t * device,uint32_t block_addr)717 static ssize_t qspi_erase_sector(bdev_t *device, uint32_t block_addr) {
718 return qspi_erase(device, block_addr, SECTOR_ERASE_CMD);
719 }
720
qspi_erase_subsector(bdev_t * device,uint32_t block_addr)721 static ssize_t qspi_erase_subsector(bdev_t *device, uint32_t block_addr) {
722 return qspi_erase(device, block_addr, SUBSECTOR_ERASE_CMD);
723 }
724
qspi_cmd(QSPI_HandleTypeDef * handle,QSPI_CommandTypeDef * s_command)725 static HAL_StatusTypeDef qspi_cmd(QSPI_HandleTypeDef *handle,
726 QSPI_CommandTypeDef *s_command) {
727 HAL_StatusTypeDef result = HAL_QSPI_Command_IT(handle, s_command);
728
729 if (result != HAL_OK) {
730 return result;
731 }
732
733 event_wait(&cmd_event);
734 return result;
735 }
736
737
setup_dma(DMA_Stream_TypeDef * stream,uint32_t peripheral_address,uint32_t memory_address,uint32_t num_bytes,uint32_t direction)738 static void setup_dma(DMA_Stream_TypeDef *stream, uint32_t peripheral_address,
739 uint32_t memory_address, uint32_t num_bytes,
740 uint32_t direction) {
741 stream->PAR = peripheral_address;
742 stream->M0AR = memory_address;
743 stream->NDTR = num_bytes;
744
745 uint32_t dma_cr = 0;
746
747 // Select Channel 3
748 dma_cr |= DMA_CHANNEL_3;
749
750 // Set the transfer priority.
751 dma_cr |= DMA_SxCR_PL;
752
753 // Enable auto memory pointer increment.
754 dma_cr |= DMA_SxCR_MINC;
755
756 if (direction == DMA_MEMORY_TO_PERIPH) {
757 dma_cr |= DMA_SxCR_DIR_0;
758 }
759
760 // Turn on transfer complete and error interrupts.
761 dma_cr |= DMA_SxCR_TCIE;
762 dma_cr |= DMA_SxCR_TEIE;
763 dma_cr |= DMA_SxCR_DMEIE;
764
765 stream->CR = dma_cr;
766 }
767
768 /* IRQ Context */
DMA_RxCpltCallback(void)769 void DMA_RxCpltCallback(void) {
770 event_signal(&rx_event, false);
771 }
772
773 /* IRQ Context */
DMA_TxCpltCallback(void)774 void DMA_TxCpltCallback(void) {
775 event_signal(&tx_event, false);
776 }
777
778 /* IRQ Context */
DMA_ErrorCallback(void)779 void DMA_ErrorCallback(void) {
780 printf("DMA Error\n");
781 }
782
783 // Send data and wait for interrupt.
qspi_tx_dma(QSPI_HandleTypeDef * handle,QSPI_CommandTypeDef * s_command,uint8_t * buf)784 static HAL_StatusTypeDef qspi_tx_dma(QSPI_HandleTypeDef *handle, QSPI_CommandTypeDef *s_command, uint8_t *buf) {
785 MODIFY_REG(handle->Instance->CCR, QUADSPI_CCR_FMODE, 0);
786
787 if (dma_disable(dma2_stream7) != NO_ERROR) {
788 dprintf(CRITICAL, "%s: timed out while waiting for DMA to disable.\n", __func__);
789 return ERR_TIMED_OUT;
790 }
791
792 setup_dma(
793 dma2_stream7,
794 (uint32_t)&(handle->Instance->DR),
795 (uint32_t)buf,
796 s_command->NbData,
797 DMA_MEMORY_TO_PERIPH
798 );
799
800 // Make sure cache is flushed to RAM before invoking the DMA controller.
801 arch_clean_cache_range((addr_t)buf, s_command->NbData);
802
803 cplt_callback = DMA_TxCpltCallback;
804
805 // And we're off to the races...
806 dma2_stream7->CR |= DMA_SxCR_EN;
807 handle->Instance->CR |= QUADSPI_CR_DMAEN;
808
809 event_wait(&tx_event);
810
811 return HAL_OK;
812 }
813
814 // Send data and wait for interrupt.
qspi_rx_dma(QSPI_HandleTypeDef * handle,QSPI_CommandTypeDef * s_command,uint8_t * buf)815 static HAL_StatusTypeDef qspi_rx_dma(QSPI_HandleTypeDef *handle, QSPI_CommandTypeDef *s_command, uint8_t *buf) {
816 // Make sure the front and back of the buffer are cache aligned.
817 DEBUG_ASSERT(IS_ALIGNED((uintptr_t)buf, CACHE_LINE));
818 DEBUG_ASSERT(IS_ALIGNED(((uintptr_t)buf) + s_command->NbData, CACHE_LINE));
819
820 MODIFY_REG(handle->Instance->CCR, QUADSPI_CCR_FMODE, QUADSPI_CCR_FMODE_0);
821
822 if (dma_disable(dma2_stream7) != NO_ERROR) {
823 dprintf(CRITICAL, "%s: timed out while waiting for DMA to disable.\n", __func__);
824 return ERR_TIMED_OUT;
825 }
826
827 setup_dma(
828 dma2_stream7,
829 (uint32_t)&(handle->Instance->DR),
830 (uint32_t)buf,
831 s_command->NbData,
832 DMA_PERIPH_TO_MEMORY
833 );
834
835 cplt_callback = DMA_RxCpltCallback;
836
837 arch_invalidate_cache_range((addr_t)buf, s_command->NbData);
838
839 // And we're off to the races...
840 dma2_stream7->CR |= DMA_SxCR_EN;
841 uint32_t addr_reg = handle->Instance->AR;
842 handle->Instance->AR = addr_reg;
843 handle->Instance->CR |= QUADSPI_CR_DMAEN;
844
845 event_wait(&rx_event);
846
847 return HAL_OK;
848 }
849
stm32_QUADSPI_IRQ(void)850 void stm32_QUADSPI_IRQ(void) {
851 arm_cm_irq_entry();
852 HAL_QSPI_IRQHandler(&qspi_handle);
853 arm_cm_irq_exit(true);
854 }
855
stm32_DMA2_Stream7_IRQ(void)856 void stm32_DMA2_Stream7_IRQ(void) {
857 arm_cm_irq_entry();
858
859 // Make a copy of the interrupts that we're handling.
860 uint32_t hisr = DMA2->HISR;
861
862 // Xfer Complete?
863 if (hisr & DMA_FLAG_TCIF3_7) {
864 DMA2->HIFCR |= DMA_FLAG_TCIF3_7;
865
866 qspi_handle.Instance->CR &= ~QUADSPI_CR_DMAEN;
867
868 dma_disable(dma2_stream7);
869
870 __HAL_QSPI_CLEAR_FLAG((&qspi_handle), QSPI_FLAG_TC);
871
872 HAL_QSPI_Abort(&qspi_handle);
873 qspi_handle.State = HAL_QSPI_STATE_READY;
874
875 cplt_callback();
876 }
877
878 // Xfer Error?
879 if (hisr & DMA_FLAG_TEIF3_7) {
880 DMA2->HIFCR |= DMA_FLAG_TEIF3_7;
881 DMA_ErrorCallback();
882 }
883
884 // Direct mode error?
885 if (hisr & DMA_FLAG_DMEIF3_7) {
886 DMA2->HIFCR |= DMA_FLAG_DMEIF3_7;
887 DMA_ErrorCallback();
888 }
889
890 arm_cm_irq_exit(true);
891 }
892
893 /* IRQ Context */
HAL_QSPI_CmdCpltCallback(QSPI_HandleTypeDef * hqspi)894 void HAL_QSPI_CmdCpltCallback(QSPI_HandleTypeDef *hqspi) {
895 event_signal(&cmd_event, false);
896 }
897
898 /* IRQ Context */
HAL_QSPI_StatusMatchCallback(QSPI_HandleTypeDef * hqspi)899 void HAL_QSPI_StatusMatchCallback(QSPI_HandleTypeDef *hqspi) {
900 event_signal(&st_event, false);
901 }
902
903 /* IRQ Context */
HAL_QSPI_ErrorCallback(QSPI_HandleTypeDef * hqspi)904 void HAL_QSPI_ErrorCallback(QSPI_HandleTypeDef *hqspi) {
905 dprintf(CRITICAL, "%s: HAL QSPI Error.\n", __func__);
906 }
907
qspi_dma_init(QSPI_HandleTypeDef * hqspi)908 status_t qspi_dma_init(QSPI_HandleTypeDef *hqspi) {
909 /* QSPI DMA Controller Clock */
910 __HAL_RCC_DMA2_CLK_ENABLE();
911
912 dma2_stream7 = DMA2_Stream7;
913
914 HAL_NVIC_EnableIRQ(DMA2_Stream7_IRQn);
915
916 return NO_ERROR;
917 }
918
get_address_size(uint32_t address)919 static uint32_t get_address_size(uint32_t address) {
920 if (address >= FOUR_BYTE_ADDR_THRESHOLD) {
921 return QSPI_ADDRESS_32_BITS;
922 }
923 return QSPI_ADDRESS_24_BITS;
924 }
925
926 // Converts a 3 byte instruction into a 4 byte instruction if necessary.
get_specialized_instruction(uint32_t instruction,uint32_t address)927 static uint32_t get_specialized_instruction(uint32_t instruction, uint32_t address) {
928 if (address < FOUR_BYTE_ADDR_THRESHOLD) {
929 return instruction;
930 }
931
932 switch (instruction) {
933 case READ_CMD:
934 return READ_4_BYTE_ADDR_CMD;
935 case FAST_READ_CMD:
936 return FAST_READ_4_BYTE_ADDR_CMD;
937 case DUAL_OUT_FAST_READ_CMD:
938 return DUAL_OUT_FAST_READ_4_BYTE_ADDR_CMD;
939 case DUAL_INOUT_FAST_READ_CMD:
940 return DUAL_INOUT_FAST_READ_4_BYTE_ADDR_CMD;
941 case QUAD_OUT_FAST_READ_CMD:
942 return QUAD_OUT_FAST_READ_4_BYTE_ADDR_CMD;
943 case QUAD_INOUT_FAST_READ_CMD:
944 return QUAD_INOUT_FAST_READ_4_BYTE_ADDR_CMD;
945 case PAGE_PROG_CMD:
946 return PAGE_PROG_4_BYTE_ADDR_CMD;
947 case QUAD_IN_FAST_PROG_CMD:
948 return QUAD_IN_FAST_PROG_4_BYTE_ADDR_CMD;
949 case SUBSECTOR_ERASE_CMD:
950 return SUBSECTOR_ERASE_4_BYTE_ADDR_CMD;
951 case SECTOR_ERASE_CMD:
952 return SECTOR_ERASE_4_BYTE_ADDR_CMD;
953 }
954
955 return instruction;
956 }
957
qspi_enable_linear(void)958 static status_t qspi_enable_linear(void) {
959 status_t result = NO_ERROR;
960
961 mutex_acquire(&spiflash_mutex);
962
963 if (device_state == QSPI_STATE_LINEAR) {
964 // Device is already in linear mode, nothing to be done.
965 goto finish;
966 }
967
968 result = qspi_dummy_cycles_cfg_unsafe(&qspi_handle);
969
970 static const QSPI_CommandTypeDef s_command = {
971 .InstructionMode = QSPI_INSTRUCTION_1_LINE,
972 .AddressSize = QSPI_ADDRESS_24_BITS,
973 .AlternateByteMode = QSPI_ALTERNATE_BYTES_NONE,
974 .DdrMode = QSPI_DDR_MODE_DISABLE,
975 .DdrHoldHalfCycle = QSPI_DDR_HHC_ANALOG_DELAY,
976 .AddressMode = QSPI_ADDRESS_1_LINE,
977 .Instruction = QUAD_OUT_FAST_READ_CMD,
978 .DataMode = QSPI_DATA_4_LINES,
979 .DummyCycles = 10,
980 .SIOOMode = QSPI_SIOO_INST_EVERY_CMD
981 };
982
983 QSPI_MemoryMappedTypeDef linear_mode_cfg = {
984 .TimeOutActivation = QSPI_TIMEOUT_COUNTER_DISABLE,
985 };
986
987 HAL_StatusTypeDef hal_result = HAL_QSPI_MemoryMapped(&qspi_handle, &s_command, &linear_mode_cfg);
988 if (hal_result != HAL_OK) {
989 result = hal_error_to_status(hal_result);
990 dprintf(CRITICAL, "%s: HAL_QSPI_MemoryMapped failed with err = %d\n",
991 __func__, hal_result);
992 goto finish;
993 }
994
995 device_state = QSPI_STATE_LINEAR;
996
997 finish:
998 mutex_release(&spiflash_mutex);
999 return result;
1000 }
1001
1002
qspi_disable_linear(void)1003 static status_t qspi_disable_linear(void) {
1004 status_t result = NO_ERROR;
1005
1006 mutex_acquire(&spiflash_mutex);
1007
1008 if (device_state == QSPI_STATE_COMMAND) {
1009 // Device is already in Command mode, nothing to be done.
1010 goto finish;
1011 }
1012
1013 result = hal_error_to_status(HAL_QSPI_Abort(&qspi_handle));
1014 if (result == NO_ERROR) {
1015 device_state = QSPI_STATE_COMMAND;
1016 } else {
1017 dprintf(CRITICAL, "%s: HAL_QSPI_Abort failed with err = %d\n",
1018 __func__, result);
1019 }
1020
1021
1022 finish:
1023 mutex_release(&spiflash_mutex);
1024 return result;
1025 }
1026
qspi_is_linear(void)1027 static bool qspi_is_linear(void) {
1028 bool result;
1029 mutex_acquire(&spiflash_mutex);
1030 result = (QSPI_STATE_LINEAR == device_state);
1031 mutex_release(&spiflash_mutex);
1032 return result;
1033 }
1034