1 /* Copyright (c) 2023, Canaan Bright Sight Co., Ltd
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * 1. Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * 2. Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 *
11 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
12 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
13 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
16 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
17 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
18 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 /*
27 * Copyright (c) 2006-2025 RT-Thread Development Team
28 *
29 * SPDX-License-Identifier: Apache-2.0
30 */
31
32 #include <rtthread.h>
33 #include <rthw.h>
34 #include <rtdevice.h>
35 #include <rtdef.h>
36 #include <rtatomic.h>
37 #include <stdbool.h>
38 #include <stdlib.h>
39 #include <stdio.h>
40 #include <riscv_io.h>
41 #include <mmu.h>
42 #include <cache.h>
43 #include <page.h>
44 #include "board.h"
45 #include "ioremap.h"
46 #include "drv_hardlock.h"
47 #include "drv_pdma.h"
48 #include <rtdbg.h>
49
50 #define DBG_TAG "drv_pdma"
51 #ifdef RT_DEBUG
52 #define DBG_LVL DBG_LOG
53 #else
54 #define DBG_LVL DBG_WARNING
55 #endif
56 #define DBG_COLOR
57
58 /**
59 * @brief PDMA controller instance initialization
60 */
61 static pdma_controller_t pdma_ctrl = {0};
62
63 #define PDMA_CH_MENUCONFIG_ENABLED(ch) \
64 (((ch) >= 0 && (ch) < PDMA_CH_MAX) ? \
65 (pdma_ctrl.chan[(ch)].menuconfig_enabled) : \
66 (RT_FALSE))
67
68 /**
69 * @brief Acquire PDMA hardware lock
70 * @note Busy-waits until lock is acquired
71 */
72 #define PDMA_LOCK() while (kd_hardlock_lock(pdma_ctrl.hardlock) != 0)
73
74 /**
75 * @brief Release PDMA hardware lock
76 */
77 #define PDMA_UNLOCK() kd_hardlock_unlock(pdma_ctrl.hardlock)
78
79 /*--------------------- Channel Enable Control ---------------------*/
80 /**
81 * @brief Enable specific PDMA channel
82 */
83 #define PDMA_CH_ENABLE(ch) \
84 (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) | (1U << (ch))))
85
86 /**
87 * @brief Disable specific PDMA channel
88 */
89 #define PDMA_CH_DISABLE(ch) \
90 (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & ~(1U << (ch))))
91
92 /**
93 * @brief Check if PDMA channel is enabled
94 */
95 #define PDMA_CH_IS_ENABLED(ch) \
96 (pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & (1U << (ch)))
97
98
99 /*--------------------- Interrupt Control ---------------------*/
100 /**
101 * @brief Enable interrupts for specific PDMA channel
102 */
103 #define PDMA_CH_INT_ENABLE(ch, mask) \
104 (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) & ~((mask) << (ch))))
105
106 /**
107 * @brief Disable interrupts for specific PDMA channel
108 */
109 #define PDMA_CH_INT_DISABLE(ch, mask) \
110 (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) | ((mask) << (ch))))
111
112 /**
113 * @brief Disable all interrupts for specific PDMA channel
114 */
115 #define PDMA_CH_INT_DISABLE_ALL(ch) \
116 PDMA_CH_INT_DISABLE(ch, PDMA_ALL_INTS)
117
118 /**
119 * @brief Clear interrupt status for specific PDMA channel
120 */
121 #define PDMA_CH_INT_CLEAR(ch, intr) \
122 (pdma_write32(&pdma_ctrl.reg->dma_int_stat, (intr) << (ch)))
123
124 /**
125 * @brief Clear all interrupt status for specific PDMA channel
126 */
127 #define PDMA_CH_INT_CLEAR_ALL(ch) \
128 PDMA_CH_INT_CLEAR(ch, PDMA_ALL_INTS)
129
130 /**
131 * @brief Check if interrupt is triggered for specific PDMA channel
132 */
133 #define PDMA_CH_INT_IS_TRIGGERED(ch, intr) \
134 (pdma_read32(&pdma_ctrl.reg->dma_int_stat) & ((intr) << (ch)))
135
136
137 /*--------------------- Status Check ---------------------*/
138 /**
139 * @brief Check if PDMA channel is busy
140 */
141 #define PDMA_CH_IS_BUSY(ch) \
142 (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_BUSY)
143
144 /**
145 * @brief Check if PDMA channel is paused
146 */
147 #define PDMA_CH_IS_PAUSED(ch) \
148 (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_PAUSE)
149
150
151 /*--------------------- Data Transfer Control ---------------------*/
152 /**
153 * @brief Start PDMA transfer on specific channel
154 */
155 #define PDMA_CH_START(ch) \
156 (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_START))
157
158 /**
159 * @brief Stop PDMA transfer on specific channel
160 */
161 #define PDMA_CH_STOP(ch) \
162 (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_STOP))
163
164 /**
165 * @brief Resume paused PDMA transfer on specific channel
166 */
167 #define PDMA_CH_RESUME(ch) \
168 (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_RESUME))
169
170
171 static void _k230_pdma_llt_free(rt_uint8_t ch);
172 static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg);
173 static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms);
174
175 /**
176 * @brief Set callback function for specified PDMA channel
177 * @param ch PDMA channel number
178 * @param func Callback function pointer
179 * @return RT_EOK on success, -RT_EINVAL on invalid parameters
180 */
k230_pdma_set_callback(rt_uint8_t ch,k230_pdma_callback_t func)181 rt_err_t k230_pdma_set_callback(rt_uint8_t ch, k230_pdma_callback_t func)
182 {
183 /* Validate channel and callback function */
184 if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || func == RT_NULL)
185 {
186 return -RT_EINVAL;
187 }
188
189 /*
190 * Safely set callback function by masking interrupts during update
191 * This prevents potential race conditions with DMA interrupts
192 */
193 rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
194 pdma_ctrl.chan[ch].cb.callback = func;
195 rt_hw_interrupt_umask(pdma_ctrl.chan[ch].irq_num);
196
197 return RT_EOK;
198 }
199
200 /**
201 * @brief Request an available PDMA channel
202 * @param ch [out] Pointer to store the allocated channel number
203 * @return rt_err_t RT_EOK if success, error code otherwise
204 */
k230_pdma_request_channel(rt_uint8_t * ch)205 rt_err_t k230_pdma_request_channel(rt_uint8_t *ch)
206 {
207 if (ch == RT_NULL)
208 {
209 LOG_E("PDMA: Invalid channel pointer");
210 return -RT_EINVAL;
211 }
212
213 rt_base_t level;
214 level = rt_hw_interrupt_disable();
215 PDMA_LOCK();
216
217 for (rt_uint8_t i = 0; i < PDMA_CH_MAX; i++)
218 {
219 if (!PDMA_CH_MENUCONFIG_ENABLED(i))
220 {
221 LOG_D("PDMA: Channel %d not enabled in menuconfig", i);
222 continue;
223 }
224
225 if (PDMA_CH_IS_ENABLED(i))
226 {
227 LOG_D("PDMA: Channel %d already enabled", i);
228 continue;
229 }
230
231 PDMA_CH_ENABLE(i);
232 LOG_D("PDMA: Trying channel %d", i);
233
234 if (!PDMA_CH_IS_ENABLED(i))
235 {
236 LOG_W("PDMA: Channel %d failed to enable - possible hardware issue", i);
237 continue;
238 }
239
240 if (PDMA_CH_IS_BUSY(i))
241 {
242 LOG_W("PDMA: Channel %d is busy, disabling", i);
243 PDMA_CH_DISABLE(i);
244 continue;
245 }
246
247 *ch = i;
248 PDMA_CH_INT_DISABLE_ALL(i);
249
250 PDMA_UNLOCK();
251 rt_hw_interrupt_enable(level);
252
253 pdma_ctrl.chan[i].cb.callback = RT_NULL;
254 pdma_ctrl.chan[i].is_hw_configured = RT_FALSE;
255 pdma_ctrl.chan[i].llt_va =RT_NULL;
256 pdma_ctrl.chan[i].page_size = 0;
257 rt_hw_interrupt_umask(pdma_ctrl.chan[i].irq_num);
258
259 LOG_I("PDMA: Allocated channel %d", i);
260 return RT_EOK;
261 }
262
263 *ch = PDMA_CH_MAX;
264 PDMA_UNLOCK();
265 rt_hw_interrupt_enable(level);
266
267 LOG_E("PDMA: No available channel found");
268 return -RT_EBUSY;
269 }
270
271 /**
272 * @brief Release an allocated PDMA channel
273 * @param ch Channel number to release
274 * @return rt_err_t RT_EOK if success, error code otherwise
275 */
k230_pdma_release_channel(rt_uint8_t ch)276 rt_err_t k230_pdma_release_channel(rt_uint8_t ch)
277 {
278 rt_base_t level;
279 level = rt_hw_interrupt_disable();
280 PDMA_LOCK();
281
282 /* Validate channel configuration and status */
283 if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
284 {
285 PDMA_UNLOCK();
286 rt_hw_interrupt_enable(level);
287 LOG_E("PDMA: Invalid channel %d to release", ch);
288 return -RT_EINVAL;
289 }
290
291 PDMA_UNLOCK();
292 rt_hw_interrupt_enable(level);
293
294 rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
295
296 /* Clear any registered callback */
297 pdma_ctrl.chan[ch].cb.callback = RT_NULL;
298
299 /* Safely stop DMA operation and release resources */
300 rt_err_t err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
301 if (err != RT_EOK)
302 {
303 LOG_E("PDMA: Failed to safely stop channel %d (err:%d)", ch, err);
304 return err;
305 }
306
307 pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
308
309 /* Disable the channel */
310 level = rt_hw_interrupt_disable();
311 PDMA_LOCK();
312 PDMA_CH_DISABLE(ch);
313 PDMA_UNLOCK();
314 rt_hw_interrupt_enable(level);
315
316 LOG_I("PDMA: Channel %d released successfully", ch);
317 return RT_EOK;
318 }
319
320 /**
321 * @brief Start a PDMA channel operation
322 * @param ch The channel number to start
323 * @return RT_EOK on success, error code on failure
324 */
k230_pdma_start(rt_uint8_t ch)325 rt_err_t k230_pdma_start(rt_uint8_t ch)
326 {
327 rt_base_t level;
328 level = rt_hw_interrupt_disable();
329 PDMA_LOCK();
330
331 LOG_D("Starting PDMA channel %d", ch);
332
333 /* Basic channel validation */
334 if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
335 {
336 LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
337 PDMA_UNLOCK();
338 rt_hw_interrupt_enable(level);
339 return -RT_EINVAL;
340 }
341
342 /* Only start DMA if channel is properly configured to prevent unclosable channel */
343 if (pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE)
344 {
345 LOG_E("Channel %d not properly configured", ch);
346 PDMA_UNLOCK();
347 rt_hw_interrupt_enable(level);
348 return -RT_ERROR;
349 }
350
351 /* Enable completion, pause and timeout interrupts */
352 PDMA_CH_INT_ENABLE(ch, PDMA_PDONE_INT | PDMA_PPAUSE_INT | PDMA_PTOUT_INT);
353
354 PDMA_UNLOCK();
355 rt_hw_interrupt_enable(level);
356
357 /* Start the channel operation */
358 PDMA_CH_START(ch);
359 LOG_I("Successfully started PDMA channel %d", ch);
360
361 /* Clear configuration flag */
362 pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE;
363
364 return RT_EOK;
365 }
366
367 /**
368 * @brief Stop an active PDMA channel operation
369 * @param ch The channel number to stop
370 * @return RT_EOK on success, error code on failure
371 */
k230_pdma_stop(rt_uint8_t ch)372 rt_err_t k230_pdma_stop(rt_uint8_t ch)
373 {
374 rt_base_t level;
375 level = rt_hw_interrupt_disable();
376 PDMA_LOCK();
377
378 LOG_D("Attempting to stop PDMA channel %d", ch);
379
380 /* Basic channel validation */
381 if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
382 {
383 LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
384 PDMA_UNLOCK();
385 rt_hw_interrupt_enable(level);
386 return -RT_EINVAL;
387 }
388
389 PDMA_UNLOCK();
390 rt_hw_interrupt_enable(level);
391
392 /* Safely stop the channel operation */
393 rt_err_t ret = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
394 if (ret == RT_EOK)
395 {
396 LOG_I("Successfully stopped PDMA channel %d", ch);
397 }
398 else
399 {
400 LOG_E("Failed to stop PDMA channel %d (error: %d)", ch, ret);
401 }
402
403 return ret;
404 }
405
406 /**
407 * @brief Convert PDMA channel configuration structure to register value
408 * @param cfg Pointer to the channel configuration structure
409 * @return 32-bit register value representing the configuration
410 */
_k230_pdma_ch_cfg_to_reg(const pdma_ch_cfg_t * cfg)411 static rt_uint32_t _k230_pdma_ch_cfg_to_reg(const pdma_ch_cfg_t *cfg)
412 {
413 rt_uint32_t reg = 0;
414
415 /* Source type configuration */
416 reg |= (cfg->ch_src_type & 0x1) << 0;
417
418 /* Device horizontal size */
419 reg |= (cfg->ch_dev_hsize & 0x3) << 1;
420
421 /* Data endianness configuration */
422 reg |= (cfg->ch_dat_endian & 0x3) << 4;
423
424 /* Device burst length */
425 reg |= (cfg->ch_dev_blen & 0xF) << 8;
426
427 /* Channel priority */
428 reg |= (cfg->ch_priority & 0xF) << 12;
429
430 /* Device timeout */
431 reg |= (cfg->ch_dev_tout & 0xFFF) << 16;
432
433 return reg;
434 }
435
436 /**
437 * @brief Configure PDMA channel with user settings
438 * @param ch Channel number to configure
439 * @param ucfg Pointer to user configuration structure
440 * @return RT_EOK on success, error code on failure
441 */
_k230_pdma_config(rt_uint8_t ch,usr_pdma_cfg_t * ucfg)442 static rt_err_t _k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
443 {
444 volatile rt_uint32_t *ch_cfg = (volatile rt_uint32_t*)(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_cfg));
445
446 LOG_D("Configuring PDMA channel %d", ch);
447
448 /* Convert configuration to register format */
449 rt_uint32_t reg_val = _k230_pdma_ch_cfg_to_reg(&ucfg->pdma_ch_cfg);
450
451 /* Write configuration to hardware registers */
452 pdma_write32(ch_cfg, reg_val);
453 pdma_write32(&(pdma_ctrl.reg->ch_peri_dev_sel[ch]), ucfg->device);
454
455 LOG_I("PDMA channel %d configured successfully", ch);
456 return RT_EOK;
457 }
458
459 /**
460 * @brief Validate user configuration parameters
461 * @param ucfg Pointer to user configuration structure
462 * @return RT_EOK if valid, error code if invalid
463 */
_k230_ucfg_check(usr_pdma_cfg_t * ucfg)464 static rt_err_t _k230_ucfg_check(usr_pdma_cfg_t *ucfg)
465 {
466 /* Parameter NULL check */
467 if (ucfg == RT_NULL)
468 {
469 LOG_E("Configuration pointer is NULL");
470 return -RT_EINVAL;
471 }
472
473 /* Device range validation */
474 if ((ucfg->device > PDM_IN) || (ucfg->device < UART0_TX))
475 {
476 LOG_E("Invalid device selection: %d", ucfg->device);
477 return -RT_EINVAL;
478 }
479
480 /* Validate peripheral data word width */
481 if ((ucfg->pdma_ch_cfg.ch_dev_hsize > PSBYTE4) ||
482 (ucfg->pdma_ch_cfg.ch_dev_hsize < PSBYTE1))
483 {
484 LOG_E("Invalid peripheral data width: %d (1-4 bytes supported)",
485 ucfg->pdma_ch_cfg.ch_dev_hsize);
486 return -RT_EINVAL;
487 }
488
489 /* Address and size alignment check */
490 if (((rt_uintptr_t)ucfg->src_addr % 4) ||
491 ((rt_uintptr_t)ucfg->dst_addr % 4) ||
492 (ucfg->line_size % 4))
493 {
494 LOG_E("Alignment error - src: 0x%08X, dst: 0x%08X, size: %d",
495 ucfg->src_addr, ucfg->dst_addr, ucfg->line_size);
496 return -RT_EINVAL;
497 }
498
499 LOG_D("User configuration validation passed");
500 return RT_EOK;
501 }
502
503 /**
504 * @brief Configure a PDMA channel with user settings
505 * @param ch Channel number to configure (0-PDMA_MAX_CHANNELS-1)
506 * @param ucfg Pointer to user configuration structure
507 * @return RT_EOK on success, error code on failure
508 */
k230_pdma_config(rt_uint8_t ch,usr_pdma_cfg_t * ucfg)509 rt_err_t k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
510 {
511 rt_err_t err;
512 rt_base_t level;
513
514 LOG_D("[CH%d] Starting PDMA configuration", ch);
515
516 /* Enter critical section */
517 level = rt_hw_interrupt_disable();
518 PDMA_LOCK();
519
520 /* Channel availability check */
521 if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
522 {
523 LOG_E("[CH%d] Channel not enabled in menuconfig or hardware", ch);
524 PDMA_UNLOCK();
525 rt_hw_interrupt_enable(level);
526 return -RT_EINVAL;
527 }
528
529 PDMA_UNLOCK();
530 rt_hw_interrupt_enable(level);
531
532 /* Validate user configuration */
533 err = _k230_ucfg_check(ucfg);
534 if (err != RT_EOK)
535 {
536 LOG_E("[CH%d] Configuration validation failed", ch);
537 return err;
538 }
539
540 /* Safely stop channel if active */
541 err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
542 if (err != RT_EOK)
543 {
544 LOG_E("[CH%d] Failed to stop channel (err: %d)", ch, err);
545 return err;
546 }
547
548 /* Apply hardware configuration */
549 _k230_pdma_config(ch, ucfg);
550 LOG_D("[CH%d] Hardware registers configured", ch);
551
552 /* Build DMA transfer linked list */
553 rt_uint32_t* llt_saddr = _k230_pdma_llt_cal(ch, ucfg);
554 if (llt_saddr == RT_NULL)
555 {
556 LOG_E("[CH%d] Failed to allocate memory for linked list", ch);
557 return -RT_ENOMEM;
558 }
559
560 /* Program linked list starting address */
561 pdma_write32(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_llt_saddr), (rt_uint32_t)(rt_uintptr_t)llt_saddr);
562 LOG_D("[CH%d] Linked list programmed (addr: 0x%p)", ch, llt_saddr);
563
564 /* Mark channel as configured */
565 pdma_ctrl.chan[ch].is_hw_configured = RT_TRUE;
566 LOG_I("[CH%d] Configuration completed successfully", ch);
567
568 return RT_EOK;
569 }
570
571 /**
572 * @brief Safely stop a PDMA channel operation
573 * @param ch Channel number to stop (0-PDMA_MAX_CHANNELS-1)
574 * @param timeout_ms Maximum wait time in milliseconds (0 for no timeout)
575 * @return RT_EOK on success, -RT_ETIMEOUT on timeout, other errors
576 */
_k230_pdma_safe_stop(rt_uint8_t ch,rt_uint32_t timeout_ms)577 static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms)
578 {
579 rt_err_t err = RT_EOK;
580 rt_tick_t start_tick;
581
582 LOG_D("[CH%d] Attempting safe stop (timeout: %dms)", ch, timeout_ms);
583
584 /* Immediately request channel stop */
585 PDMA_CH_STOP(ch);
586
587 /* Wait for channel to become inactive */
588 start_tick = rt_tick_get();
589 while (PDMA_CH_IS_BUSY(ch))
590 {
591 /* Check for timeout if specified */
592 if (timeout_ms > 0 &&
593 (rt_tick_get_delta(start_tick) >= rt_tick_from_millisecond(timeout_ms)))
594 {
595 LOG_E("[CH%d] Stop operation timed out", ch);
596 return -RT_ETIMEOUT;
597 }
598
599 rt_thread_mdelay(1);
600 }
601
602 /* Enter critical section for register cleanup */
603 rt_base_t level = rt_hw_interrupt_disable();
604 PDMA_LOCK();
605
606 /* Clear and disable all interrupts */
607 PDMA_CH_INT_CLEAR_ALL(ch);
608 PDMA_CH_INT_DISABLE_ALL(ch);
609 LOG_D("[CH%d] Interrupts cleared and disabled", ch);
610
611 PDMA_UNLOCK();
612 rt_hw_interrupt_enable(level);
613
614 /* Free linked list memory */
615 _k230_pdma_llt_free(ch);
616 LOG_D("[CH%d] Linked list memory freed", ch);
617
618 pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
619
620 LOG_I("[CH%d] Successfully stopped", ch);
621 return RT_EOK;
622 }
623
624 /**
625 * @brief Calculate and allocate PDMA linked list table (LLT)
626 * @param ch Channel number (0-PDMA_MAX_CHANNELS-1)
627 * @param pdma_cfg Pointer to PDMA configuration structure
628 * @return Physical address of LLT on success, RT_NULL on failure
629 */
_k230_pdma_llt_cal(rt_uint8_t ch,usr_pdma_cfg_t * pdma_cfg)630 static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg)
631 {
632 rt_int32_t i;
633 rt_uint32_t list_num;
634 pdma_llt_t *llt_list;
635 rt_bool_t mem_to_dev;
636
637 LOG_D("[CH%d] Calculating LLT parameters", ch);
638
639 /* Calculate number of LLT entries needed */
640 list_num = (pdma_cfg->line_size - 1) / PDMA_MAX_LINE_SIZE + 1;
641 LOG_D("[CH%d] Line size: %d, requires %d LLT entries",
642 ch, pdma_cfg->line_size, list_num);
643
644 /* Determine transfer direction */
645 mem_to_dev = (pdma_cfg->pdma_ch_cfg.ch_src_type == CONTINUE) ? RT_TRUE : RT_FALSE;
646 LOG_D("[CH%d] Transfer direction: %s", ch, mem_to_dev ? "Memory->Device" : "Device->Memory");
647
648 /* Allocate memory for LLT */
649 pdma_ctrl.chan[ch].page_size = rt_page_bits(sizeof(pdma_llt_t) * list_num);
650 llt_list = (pdma_llt_t *)rt_pages_alloc(pdma_ctrl.chan[ch].page_size);
651
652 if (llt_list == RT_NULL)
653 {
654 pdma_ctrl.chan[ch].page_size = 0 ;
655
656 LOG_E("[CH%d] Failed to allocate memory for LLT", ch);
657 return RT_NULL;
658 }
659 LOG_D("[CH%d] Allocated %d bytes for LLT", ch, sizeof(pdma_llt_t) * list_num);
660
661 pdma_ctrl.chan[ch].llt_va = llt_list;
662
663 /* Initialize LLT entries */
664 for (i = 0; i < list_num; i++)
665 {
666 /* Set source and destination addresses */
667 if (mem_to_dev)
668 {
669 llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr + PDMA_MAX_LINE_SIZE * i);
670 llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr); /* Device address remains fixed */
671 }
672 else
673 {
674 llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr); /* Device address remains fixed */
675 llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr + PDMA_MAX_LINE_SIZE * i);
676 }
677
678 /* Set transfer size and next pointer */
679 if (i == list_num - 1)
680 {
681 /* Last entry uses remaining size */
682 llt_list[i].line_size = (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) ?
683 (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) :
684 PDMA_MAX_LINE_SIZE;
685 llt_list[i].next_llt_addr = 0; /* Terminate list */
686 LOG_D("[CH%d] Last LLT entry: size=%d", ch, llt_list[i].line_size);
687 }
688 else
689 {
690 llt_list[i].line_size = PDMA_MAX_LINE_SIZE;
691 /* Convert virtual address of next entry to physical address */
692 void *next_llt_va = &llt_list[i+1];
693 llt_list[i].next_llt_addr = (rt_uint32_t)(intptr_t)rt_kmem_v2p(next_llt_va);
694 }
695 llt_list[i].pause = 0;
696 }
697
698 /* Handle cache coherency based on transfer direction */
699 if (mem_to_dev)
700 {
701 /* Memory to Device: clean source data cache */
702 void *src_va = rt_kmem_p2v(pdma_cfg->src_addr);
703 rt_hw_cpu_dcache_clean(src_va, pdma_cfg->line_size);
704 LOG_D("[CH%d] Cleaned source cache (va: %p, size: %d)",
705 ch, src_va, pdma_cfg->line_size);
706 }
707 else
708 {
709 /* Device to Memory: invalidate destination cache */
710 void *dst_va = rt_kmem_p2v(pdma_cfg->dst_addr);
711 rt_hw_cpu_dcache_invalidate(dst_va, pdma_cfg->line_size);
712 LOG_D("[CH%d] Invalidated destination cache (va: %p, size: %d)",
713 ch, dst_va, pdma_cfg->line_size);
714 }
715
716 /* Ensure LLT is visible to DMA */
717 rt_hw_cpu_dcache_clean((void*)llt_list, sizeof(pdma_llt_t) * list_num);
718 LOG_D("[CH%d] Cleaned LLT cache (va: %p, size: %d)",
719 ch, llt_list, sizeof(pdma_llt_t) * list_num);
720
721 /* Return physical address of LLT */
722 void *llt_list_pa = rt_kmem_v2p(llt_list);
723 LOG_I("[CH%d] LLT calculation complete (pa: %p)", ch, llt_list_pa);
724
725 return (rt_uint32_t *)llt_list_pa;
726 }
727
728 /**
729 * @brief Free allocated PDMA linked list table (LLT) memory
730 * @param ch Channel number (0-PDMA_MAX_CHANNELS-1) to free
731 */
_k230_pdma_llt_free(rt_uint8_t ch)732 static void _k230_pdma_llt_free(rt_uint8_t ch)
733 {
734 rt_uint32_t *llt_list_pa;
735 void *llt_list_va;
736
737 LOG_D("[CH%d] Freeing LLT memory", ch);
738
739 if(pdma_ctrl.chan[ch].llt_va != RT_NULL)
740 {
741 /* Free the allocated pages */
742 rt_pages_free(pdma_ctrl.chan[ch].llt_va, pdma_ctrl.chan[ch].page_size);
743 pdma_ctrl.chan[ch].llt_va = 0;
744 pdma_ctrl.chan[ch].page_size = 0;
745 LOG_D("[CH%d] Freed %d bytes of LLT memory", ch,pdma_ctrl.chan[ch].page_size);
746 }
747 }
748
749 /**
750 * @brief PDMA interrupt service routine
751 * @param irq Interrupt number (unused)
752 * @param param Channel number passed as void pointer
753 */
k230_pdma_isr(int irq,void * param)754 static void k230_pdma_isr(int irq, void *param)
755 {
756 rt_uint8_t ch = (rt_uintptr_t)param; /* Convert channel parameter */
757 rt_bool_t success = RT_FALSE; /* Transfer result flag */
758 k230_pdma_callback_t callback = RT_NULL; /* Callback function pointer */
759
760 LOG_D("[CH%d] PDMA interrupt triggered", ch);
761
762 PDMA_LOCK();
763
764 /* Only process interrupts for enabled channels */
765 if (PDMA_CH_MENUCONFIG_ENABLED(ch) && PDMA_CH_IS_ENABLED(ch))
766 {
767 /* Check for transfer complete interrupt */
768 if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PDONE_INT))
769 {
770 success = RT_TRUE;
771 callback = pdma_ctrl.chan[ch].cb.callback;
772 LOG_D("[CH%d] Transfer complete", ch);
773 }
774 /* Check for timeout interrupt */
775 else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PTOUT_INT))
776 {
777 success = RT_FALSE;
778 callback = pdma_ctrl.chan[ch].cb.callback;
779 LOG_E("[CH%d] Transfer timeout", ch);
780 }
781 /* Check for pause interrupt */
782 else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PPAUSE_INT))
783 {
784 PDMA_CH_RESUME(ch);
785 LOG_D("[CH%d] Transfer resumed", ch);
786 }
787
788 /* Clear all interrupt flags for this channel */
789 PDMA_CH_INT_CLEAR_ALL(ch);
790 LOG_D("[CH%d] Interrupts cleared", ch);
791 }
792
793 PDMA_UNLOCK();
794
795 if (callback)
796 {
797 callback(ch, success);
798 }
799 }
800
801 /**
802 * @brief Initialize PDMA hardware device
803 * @return RT_EOK on success, error code on failure
804 */
rt_hw_pdma_device_init(void)805 int rt_hw_pdma_device_init(void)
806 {
807 LOG_I("Initializing PDMA controller");
808
809 /* Map PDMA registers */
810 pdma_ctrl.reg = rt_ioremap((void *)DMA_BASE_ADDR, DMA_IO_SIZE);
811 if (RT_NULL == pdma_ctrl.reg)
812 {
813 LOG_E("Failed to map PDMA registers");
814 return -RT_ERROR;
815 }
816 LOG_D("Mapped PDMA registers at 0x%08X", DMA_BASE_ADDR);
817
818 if (kd_request_lock(HARDLOCK_PDMA))
819 {
820 pdma_ctrl.hardlock = -1;
821 rt_iounmap(pdma_ctrl.reg);
822 LOG_E("Failed to acquire PDMA hardware lock");
823 return -RT_ERROR;
824 }
825 pdma_ctrl.hardlock = HARDLOCK_PDMA;
826 LOG_D("Acquired PDMA hardware lock");
827
828 /* Install and enable interrupts for configured channels */
829 #if defined(BSP_USING_PDMA_CHANNEL0)
830 pdma_ctrl.chan[PDMA_CH_0].menuconfig_enabled = RT_TRUE;
831 pdma_ctrl.chan[PDMA_CH_0].irq_num = PDMA_CHANNEL0_IRQn;
832 rt_hw_interrupt_install(PDMA_CHANNEL0_IRQn, k230_pdma_isr, (void *)PDMA_CH_0, "pdma_ch0");
833 LOG_D("Enabled interrupts for channel 0");
834 #endif
835
836 #if defined(BSP_USING_PDMA_CHANNEL1)
837 pdma_ctrl.chan[PDMA_CH_1].menuconfig_enabled = RT_TRUE;
838 pdma_ctrl.chan[PDMA_CH_1].irq_num = PDMA_CHANNEL1_IRQn;
839 rt_hw_interrupt_install(PDMA_CHANNEL1_IRQn, k230_pdma_isr, (void *)PDMA_CH_1, "pdma_ch1");
840 LOG_D("Enabled interrupts for channel 1");
841 #endif
842
843 #if defined(BSP_USING_PDMA_CHANNEL2)
844 pdma_ctrl.chan[PDMA_CH_2].menuconfig_enabled = RT_TRUE;
845 pdma_ctrl.chan[PDMA_CH_2].irq_num = PDMA_CHANNEL2_IRQn;
846 rt_hw_interrupt_install(PDMA_CHANNEL2_IRQn, k230_pdma_isr, (void *)PDMA_CH_2, "pdma_ch2");
847 LOG_D("Enabled interrupts for channel 2");
848 #endif
849
850 #if defined(BSP_USING_PDMA_CHANNEL3)
851 pdma_ctrl.chan[PDMA_CH_3].menuconfig_enabled = RT_TRUE;
852 pdma_ctrl.chan[PDMA_CH_3].irq_num = PDMA_CHANNEL3_IRQn;
853 rt_hw_interrupt_install(PDMA_CHANNEL3_IRQn, k230_pdma_isr, (void *)PDMA_CH_3, "pdma_ch3");
854 LOG_D("Enabled interrupts for channel 3");
855 #endif
856
857 #if defined(BSP_USING_PDMA_CHANNEL4)
858 pdma_ctrl.chan[PDMA_CH_4].menuconfig_enabled = RT_TRUE;
859 pdma_ctrl.chan[PDMA_CH_4].irq_num = PDMA_CHANNEL4_IRQn;
860 rt_hw_interrupt_install(PDMA_CHANNEL4_IRQn, k230_pdma_isr, (void *)PDMA_CH_4, "pdma_ch4");
861 LOG_D("Enabled interrupts for channel 4");
862 #endif
863
864 #if defined(BSP_USING_PDMA_CHANNEL5)
865 pdma_ctrl.chan[PDMA_CH_5].menuconfig_enabled = RT_TRUE;
866 pdma_ctrl.chan[PDMA_CH_5].irq_num = PDMA_CHANNEL5_IRQn;
867 rt_hw_interrupt_install(PDMA_CHANNEL5_IRQn, k230_pdma_isr, (void *)PDMA_CH_5, "pdma_ch5");
868 LOG_D("Enabled interrupts for channel 5");
869 #endif
870
871 #if defined(BSP_USING_PDMA_CHANNEL6)
872 pdma_ctrl.chan[PDMA_CH_6].menuconfig_enabled = RT_TRUE;
873 pdma_ctrl.chan[PDMA_CH_6].irq_num = PDMA_CHANNEL6_IRQn;
874 rt_hw_interrupt_install(PDMA_CHANNEL6_IRQn, k230_pdma_isr, (void *)PDMA_CH_6, "pdma_ch6");
875 LOG_D("Enabled interrupts for channel 6");
876 #endif
877
878 #if defined(BSP_USING_PDMA_CHANNEL7)
879 pdma_ctrl.chan[PDMA_CH_7].menuconfig_enabled = RT_TRUE;
880 pdma_ctrl.chan[PDMA_CH_7].irq_num = PDMA_CHANNEL7_IRQn;
881 rt_hw_interrupt_install(PDMA_CHANNEL7_IRQn, k230_pdma_isr, (void *)PDMA_CH_7, "pdma_ch7");
882 LOG_D("Enabled interrupts for channel 7");
883 #endif
884
885 return RT_EOK;
886 }
887 INIT_BOARD_EXPORT(rt_hw_pdma_device_init);