1 /*
2 * Copyright (c) 2025 Silicon Laboratories Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Verify zephyr dma memory to memory transfer with block append during a transfer
10 * @details
11 * - 4 tests:
12 * -# restart_transfer test: Check that the function silabs ldma append block function restart
13 * the transfer if we append a block after the transfer is done.
14 * -# restart_in_isr test: Check that is a transfer is done during the append, the next dma isr
15 * will restart the transfer with the right append block.
16 * -# stress_in_isr test: Check that we can append the next block immediately after a
17 * DMA_STATUS_BLOCK callback.
18 * -# loopstress test: Check that we can continuously append block and check that the function
19 * return an error if we append a transfer that already has an append block
20 */
21
22 #include <zephyr/kernel.h>
23 #include <zephyr/drivers/dma.h>
24 #include <zephyr/drivers/dma/dma_silabs_ldma.h>
25 #include <zephyr/ztest.h>
26
27 #define BLOCK_SIZE 4
28
29 /* this src memory shall be in RAM to support using as a DMA source pointer.*/
30 static __aligned(32) uint8_t tx_data[CONFIG_DMA_BA_XFER_SIZE];
31 static __aligned(32) uint8_t rx_data[CONFIG_DMA_BA_XFER_SIZE];
32
33 K_SEM_DEFINE(xfer_sem, 0, 1);
34
35 static struct dma_config dma_cfg = {0};
36 static struct dma_block_config dma_block_cfg;
37 static uint32_t rx_idx;
38 static uint32_t tx_idx;
39
dma_ba_callback_restart(const struct device * dma_dev,void * user_data,uint32_t channel,int status)40 static void dma_ba_callback_restart(const struct device *dma_dev, void *user_data, uint32_t channel,
41 int status)
42 {
43 if (status < 0) {
44 TC_PRINT("callback status %d\n", status);
45 } else {
46 TC_PRINT("giving xfer_sem\n");
47 k_sem_give(&xfer_sem);
48 }
49 }
50
test_ba_restart_transfer(void)51 static int test_ba_restart_transfer(void)
52 {
53 const struct device *dma;
54 static int chan_id;
55
56 TC_PRINT("Preparing DMA Controller\n");
57
58 memset(tx_data, 0, sizeof(tx_data));
59
60 for (int i = 0; i < CONFIG_DMA_BA_XFER_SIZE; i++) {
61 tx_data[i] = i;
62 }
63
64 memset(rx_data, 0, sizeof(rx_data));
65
66 dma = DEVICE_DT_GET(DT_ALIAS(dma0));
67 if (!device_is_ready(dma)) {
68 TC_PRINT("dma controller device is not ready\n");
69 return TC_FAIL;
70 }
71
72 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
73 dma_cfg.source_data_size = 1U;
74 dma_cfg.dest_data_size = 1U;
75 dma_cfg.source_burst_length = 1U;
76 dma_cfg.dest_burst_length = 1U;
77 dma_cfg.user_data = NULL;
78 dma_cfg.dma_callback = dma_ba_callback_restart;
79 dma_cfg.head_block = &dma_block_cfg;
80 dma_cfg.complete_callback_en = true; /* per block completion */
81
82 chan_id = dma_request_channel(dma, NULL);
83 if (chan_id < 0) {
84 return TC_FAIL;
85 }
86
87 memset(&dma_block_cfg, 0, sizeof(dma_block_cfg));
88 dma_block_cfg.block_size = CONFIG_DMA_BA_XFER_SIZE / 2;
89 dma_block_cfg.source_address = (uint32_t)(tx_data);
90 dma_block_cfg.dest_address = (uint32_t)(rx_data);
91 TC_PRINT("block_size %d, source addr %x, dest addr %x\n", CONFIG_DMA_BA_XFER_SIZE,
92 dma_block_cfg.source_address, dma_block_cfg.dest_address);
93
94 if (dma_config(dma, chan_id, &dma_cfg)) {
95 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
96 return TC_FAIL;
97 }
98
99 TC_PRINT("Starting the transfer on channel %d\n", chan_id);
100
101 if (dma_start(dma, chan_id)) {
102 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
103 return TC_FAIL;
104 }
105
106 /* Be sure that the DMA transfer is done */
107 k_busy_wait(1000 * 100); /* busy wait for 100 ms*/
108
109 /* Append a next block on the channel that is already done*/
110 dma_block_cfg.source_address = (uint32_t)(tx_data) + CONFIG_DMA_BA_XFER_SIZE / 2;
111 dma_block_cfg.dest_address = (uint32_t)(rx_data) + CONFIG_DMA_BA_XFER_SIZE / 2;
112
113 dma_cfg.head_block = &dma_block_cfg;
114 silabs_ldma_append_block(dma, chan_id, &dma_cfg);
115
116 if (k_sem_take(&xfer_sem, K_MSEC(1000)) != 0) {
117 TC_PRINT("Timed out waiting for xfers\n");
118 return TC_FAIL;
119 }
120
121 TC_PRINT("Verify RX buffer should contain the full TX buffer string.\n");
122
123 if (memcmp(tx_data, rx_data, CONFIG_DMA_BA_XFER_SIZE)) {
124 return TC_FAIL;
125 }
126
127 TC_PRINT("Finished: DMA block append restart transfer\n");
128 return TC_PASS;
129 }
130
test_ba_restart_in_isr(void)131 static int test_ba_restart_in_isr(void)
132 {
133 const struct device *dma;
134 static int chan_id;
135 unsigned int key;
136
137 TC_PRINT("Preparing DMA Controller\n");
138
139 memset(tx_data, 0, sizeof(tx_data));
140
141 for (int i = 0; i < CONFIG_DMA_BA_XFER_SIZE; i++) {
142 tx_data[i] = i;
143 }
144
145 memset(rx_data, 0, sizeof(rx_data));
146
147 dma = DEVICE_DT_GET(DT_ALIAS(dma0));
148 if (!device_is_ready(dma)) {
149 TC_PRINT("dma controller device is not ready\n");
150 return TC_FAIL;
151 }
152
153 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
154 dma_cfg.source_data_size = 1U;
155 dma_cfg.dest_data_size = 1U;
156 dma_cfg.source_burst_length = 1U;
157 dma_cfg.dest_burst_length = 1U;
158 dma_cfg.user_data = NULL;
159 dma_cfg.dma_callback = dma_ba_callback_restart;
160 dma_cfg.head_block = &dma_block_cfg;
161 dma_cfg.complete_callback_en = true; /* per block completion */
162
163 chan_id = dma_request_channel(dma, NULL);
164 if (chan_id < 0) {
165 return TC_FAIL;
166 }
167
168 memset(&dma_block_cfg, 0, sizeof(dma_block_cfg));
169 dma_block_cfg.block_size = CONFIG_DMA_BA_XFER_SIZE / 2;
170 dma_block_cfg.source_address = (uint32_t)(tx_data);
171 dma_block_cfg.dest_address = (uint32_t)(rx_data);
172 TC_PRINT("block_size %d, source addr %x, dest addr %x\n", CONFIG_DMA_BA_XFER_SIZE,
173 dma_block_cfg.source_address, dma_block_cfg.dest_address);
174
175 if (dma_config(dma, chan_id, &dma_cfg)) {
176 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
177 return TC_FAIL;
178 }
179
180 TC_PRINT("Starting the transfer on channel %d and waiting completion\n", chan_id);
181
182 /* Lock IRQ in order to not triger the DMA isr */
183 key = irq_lock();
184 if (dma_start(dma, chan_id)) {
185 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
186 return TC_FAIL;
187 }
188
189 /* Be sure that the DMA transfer is done */
190 k_busy_wait(1000 * 100); /* busy wait for 100 ms*/
191
192 /* Remove the done flag of the dma channel to simulate an append while a transfer is
193 * ongoing
194 */
195 sys_clear_bit((mem_addr_t)&LDMA->CHDONE, chan_id);
196
197 /* Append a next block on the channel that is already done*/
198 dma_block_cfg.source_address = (uint32_t)(tx_data) + CONFIG_DMA_BA_XFER_SIZE / 2;
199 dma_block_cfg.dest_address = (uint32_t)(rx_data) + CONFIG_DMA_BA_XFER_SIZE / 2;
200
201 dma_cfg.head_block = &dma_block_cfg;
202 silabs_ldma_append_block(dma, chan_id, &dma_cfg);
203
204 /* Set the chdone bit to simulate that the DMA transfer was finished while appending a
205 * block
206 */
207 sys_set_bit((mem_addr_t)&LDMA->CHDONE, chan_id);
208
209 /* Check if the isr in dma driver will correctly restart the DMA engine with the new block
210 * that was append
211 */
212 irq_unlock(key);
213
214 if (k_sem_take(&xfer_sem, K_MSEC(1000)) != 0) {
215 TC_PRINT("Timed out waiting for xfers\n");
216 return TC_FAIL;
217 }
218
219 TC_PRINT("Verify RX buffer should contain the full TX buffer string.\n");
220
221 if (memcmp(tx_data, rx_data, CONFIG_DMA_BA_XFER_SIZE)) {
222 return TC_FAIL;
223 }
224
225 TC_PRINT("Finished: DMA block append restart in isr\n");
226 return TC_PASS;
227 }
228
dma_ba_callback_stress_in_isr(const struct device * dma_dev,void * user_data,uint32_t channel,int status)229 static void dma_ba_callback_stress_in_isr(const struct device *dma_dev, void *user_data,
230 uint32_t channel, int status)
231 {
232 struct dma_config *dma_cfg = (struct dma_config *)user_data;
233
234 if (status < 0) {
235 TC_PRINT("callback status %d\n", status);
236 } else {
237 if (rx_idx <= CONFIG_DMA_BA_XFER_SIZE - BLOCK_SIZE) {
238 dma_block_cfg.source_address = (uint32_t)(tx_data + tx_idx);
239 dma_block_cfg.dest_address = (uint32_t)(rx_data + rx_idx);
240 rx_idx += BLOCK_SIZE;
241 tx_idx += BLOCK_SIZE;
242 if (silabs_ldma_append_block(dma_dev, channel, dma_cfg)) {
243 TC_PRINT("append block failed\n");
244 }
245 } else {
246 TC_PRINT("giving xfer_sem\n");
247 k_sem_give(&xfer_sem);
248 }
249 }
250 }
251
test_ba_stress_in_isr(void)252 static int test_ba_stress_in_isr(void)
253 {
254 const struct device *dma;
255 static int chan_id;
256 unsigned int key;
257
258 TC_PRINT("Preparing DMA Controller\n");
259
260 memset(tx_data, 0, sizeof(tx_data));
261
262 rx_idx = 0;
263 tx_idx = 0;
264
265 for (int i = 0; i < CONFIG_DMA_BA_XFER_SIZE; i++) {
266 tx_data[i] = i;
267 }
268
269 memset(rx_data, 0, sizeof(rx_data));
270
271 dma = DEVICE_DT_GET(DT_ALIAS(dma0));
272 if (!device_is_ready(dma)) {
273 return TC_FAIL;
274 }
275
276 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
277 dma_cfg.source_data_size = 1U;
278 dma_cfg.dest_data_size = 1U;
279 dma_cfg.source_burst_length = 1U;
280 dma_cfg.dest_burst_length = 1U;
281 dma_cfg.user_data = &dma_cfg;
282 dma_cfg.dma_callback = dma_ba_callback_stress_in_isr;
283 dma_cfg.head_block = &dma_block_cfg;
284 dma_cfg.complete_callback_en = true; /* per block completion */
285
286 chan_id = dma_request_channel(dma, NULL);
287 if (chan_id < 0) {
288 return TC_FAIL;
289 }
290
291 memset(&dma_block_cfg, 0, sizeof(dma_block_cfg));
292
293 /* Configure the first transfer block*/
294 dma_block_cfg.block_size = BLOCK_SIZE;
295 dma_block_cfg.source_address = (uint32_t)(tx_data + tx_idx);
296 dma_block_cfg.dest_address = (uint32_t)(rx_data + rx_idx);
297 rx_idx += BLOCK_SIZE;
298 tx_idx += BLOCK_SIZE;
299
300 TC_PRINT("dma block %d block_size %d, source addr %x, dest addr %x\n", 0, BLOCK_SIZE,
301 dma_block_cfg.source_address, dma_block_cfg.dest_address);
302
303 if (dma_config(dma, chan_id, &dma_cfg)) {
304 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
305 return TC_FAIL;
306 }
307
308 TC_PRINT("Starting the transfer on channel %d and waiting completion\n", chan_id);
309
310 /* Lock IRQ in order to not triger the DMA isr */
311 key = irq_lock();
312 if (dma_start(dma, chan_id)) {
313 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
314 return TC_FAIL;
315 }
316
317 /* Append a new block on the channel */
318 dma_block_cfg.source_address = (uint32_t)(tx_data + tx_idx);
319 dma_block_cfg.dest_address = (uint32_t)(rx_data + rx_idx);
320 rx_idx += BLOCK_SIZE;
321 tx_idx += BLOCK_SIZE;
322 silabs_ldma_append_block(dma, chan_id, &dma_cfg);
323
324 irq_unlock(key);
325
326 /* All the next blocks will be append in the isr */
327
328 if (k_sem_take(&xfer_sem, K_MSEC(1000)) != 0) {
329 TC_PRINT("Timed out waiting for xfers\n");
330 return TC_FAIL;
331 }
332
333 TC_PRINT("Verify RX buffer should contain the full TX buffer string.\n");
334
335 if (memcmp(tx_data, rx_data, CONFIG_DMA_BA_XFER_SIZE)) {
336 return TC_FAIL;
337 }
338
339 TC_PRINT("Finished: DMA block append stress in isr\n");
340 return TC_PASS;
341 }
342
dma_ba_callback_loopstress(const struct device * dma_dev,void * user_data,uint32_t channel,int status)343 static void dma_ba_callback_loopstress(const struct device *dma_dev, void *user_data,
344 uint32_t channel, int status)
345 {
346 if (status < 0) {
347 TC_PRINT("callback status %d\n", status);
348 } else {
349 if (rx_idx == CONFIG_DMA_BA_XFER_SIZE) {
350 TC_PRINT("giving xfer_sem\n");
351 k_sem_give(&xfer_sem);
352 }
353 }
354 }
355
test_ba_loopstress(void)356 static int test_ba_loopstress(void)
357 {
358 const struct device *dma;
359 static int chan_id;
360
361 TC_PRINT("Preparing DMA Controller\n");
362
363 memset(tx_data, 0, sizeof(tx_data));
364
365 rx_idx = 0;
366 tx_idx = 0;
367
368 for (int i = 0; i < CONFIG_DMA_BA_XFER_SIZE; i++) {
369 tx_data[i] = i;
370 }
371
372 memset(rx_data, 0, sizeof(rx_data));
373
374 dma = DEVICE_DT_GET(DT_ALIAS(dma0));
375 if (!device_is_ready(dma)) {
376 TC_PRINT("dma controller device is not ready\n");
377 return TC_FAIL;
378 }
379
380 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
381 dma_cfg.source_data_size = 1U;
382 dma_cfg.dest_data_size = 1U;
383 dma_cfg.source_burst_length = 1U;
384 dma_cfg.dest_burst_length = 1U;
385 dma_cfg.user_data = &dma_cfg;
386 dma_cfg.dma_callback = dma_ba_callback_loopstress;
387 dma_cfg.head_block = &dma_block_cfg;
388 dma_cfg.complete_callback_en = true; /* per block completion */
389
390 chan_id = dma_request_channel(dma, NULL);
391 if (chan_id < 0) {
392 return TC_FAIL;
393 }
394
395 memset(&dma_block_cfg, 0, sizeof(dma_block_cfg));
396
397 /* Setting the first DMA transfer block */
398 dma_block_cfg.block_size = BLOCK_SIZE;
399 dma_block_cfg.source_address = (uint32_t)(tx_data + tx_idx);
400 dma_block_cfg.dest_address = (uint32_t)(rx_data + rx_idx);
401 rx_idx += BLOCK_SIZE;
402 tx_idx += BLOCK_SIZE;
403
404 TC_PRINT("dma block %d block_size %d, source addr %x, dest addr %x\n", 0, BLOCK_SIZE,
405 dma_block_cfg.source_address, dma_block_cfg.dest_address);
406
407 if (dma_config(dma, chan_id, &dma_cfg)) {
408 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
409 return TC_FAIL;
410 }
411
412 TC_PRINT("Starting the transfer on channel %d and waiting completion\n", chan_id);
413
414 if (dma_start(dma, chan_id)) {
415 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
416 return TC_FAIL;
417 }
418
419 /* Append new blocks on the channel */
420 while (rx_idx <= CONFIG_DMA_BA_XFER_SIZE - BLOCK_SIZE) {
421 dma_block_cfg.source_address = (uint32_t)(tx_data + tx_idx);
422 dma_block_cfg.dest_address = (uint32_t)(rx_data + rx_idx);
423
424 if (!silabs_ldma_append_block(dma, chan_id, &dma_cfg)) {
425 rx_idx += BLOCK_SIZE;
426 tx_idx += BLOCK_SIZE;
427 }
428 }
429
430 if (k_sem_take(&xfer_sem, K_MSEC(1000)) != 0) {
431 TC_PRINT("Timed out waiting for xfers\n");
432 return TC_FAIL;
433 }
434
435 TC_PRINT("Verify RX buffer should contain the full TX buffer string.\n");
436
437 if (memcmp(tx_data, rx_data, CONFIG_DMA_BA_XFER_SIZE)) {
438 return TC_FAIL;
439 }
440
441 TC_PRINT("Finished: DMA block append loopstress\n");
442 return TC_PASS;
443 }
444
445 /* export test cases */
ZTEST(dma_m2m_ba,test_dma_m2m_ba_restart_transfer)446 ZTEST(dma_m2m_ba, test_dma_m2m_ba_restart_transfer)
447 {
448 zassert_true((test_ba_restart_transfer() == TC_PASS));
449 }
450
ZTEST(dma_m2m_ba,test_dma_m2m_ba_restart_in_isr)451 ZTEST(dma_m2m_ba, test_dma_m2m_ba_restart_in_isr)
452 {
453 zassert_true((test_ba_restart_in_isr() == TC_PASS));
454 }
455
ZTEST(dma_m2m_ba,test_dma_m2m_stress_in_isr)456 ZTEST(dma_m2m_ba, test_dma_m2m_stress_in_isr)
457 {
458 zassert_true((test_ba_stress_in_isr() == TC_PASS));
459 }
460
ZTEST(dma_m2m_ba,test_dma_m2m_loopstress)461 ZTEST(dma_m2m_ba, test_dma_m2m_loopstress)
462 {
463 zassert_true((test_ba_loopstress() == TC_PASS));
464 }
465