1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-02-25 GuEe-GUI the first version
9 */
10
11 #include <rthw.h>
12 #include <rtthread.h>
13 #include <rtdevice.h>
14
15 #define DBG_TAG "rtdm.dma"
16 #define DBG_LVL DBG_INFO
17 #include <rtdbg.h>
18
19 static rt_list_t dmac_nodes = RT_LIST_OBJECT_INIT(dmac_nodes);
20 static RT_DEFINE_SPINLOCK(dmac_nodes_lock);
21
rt_dma_controller_register(struct rt_dma_controller * ctrl)22 rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl)
23 {
24 const char *dev_name;
25 char dma_name[RT_NAME_MAX];
26
27 if (!ctrl || !ctrl->dev || !ctrl->ops)
28 {
29 return -RT_EINVAL;
30 }
31
32 dev_name = rt_dm_dev_get_name(ctrl->dev);
33
34 if (rt_bitmap_next_set_bit(ctrl->dir_cap, 0, RT_DMA_DIR_MAX) == RT_DMA_DIR_MAX)
35 {
36 LOG_E("%s: Not direction capability", dev_name);
37
38 return -RT_EINVAL;
39 }
40
41 rt_snprintf(dma_name, sizeof(dma_name), "%s-dmac", dev_name);
42
43 rt_list_init(&ctrl->list);
44
45 rt_spin_lock(&dmac_nodes_lock);
46 rt_list_insert_before(&dmac_nodes, &ctrl->list);
47 rt_spin_unlock(&dmac_nodes_lock);
48
49 rt_list_init(&ctrl->channels_nodes);
50 rt_mutex_init(&ctrl->mutex, dma_name, RT_IPC_FLAG_PRIO);
51
52 if (ctrl->dev->ofw_node)
53 {
54 rt_dm_dev_bind_fwdata(ctrl->dev, RT_NULL, ctrl);
55 }
56
57 return RT_EOK;
58 }
59
rt_dma_controller_unregister(struct rt_dma_controller * ctrl)60 rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
61 {
62 if (!ctrl)
63 {
64 return -RT_EINVAL;
65 }
66
67 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
68
69 if (!rt_list_isempty(&ctrl->channels_nodes))
70 {
71 rt_mutex_release(&ctrl->mutex);
72 return -RT_EBUSY;
73 }
74
75 if (ctrl->dev->ofw_node)
76 {
77 rt_dm_dev_unbind_fwdata(ctrl->dev, RT_NULL);
78 }
79
80 rt_mutex_release(&ctrl->mutex);
81 rt_mutex_detach(&ctrl->mutex);
82
83 rt_spin_lock(&dmac_nodes_lock);
84 rt_list_remove(&ctrl->list);
85 rt_spin_unlock(&dmac_nodes_lock);
86
87 return RT_EOK;
88 }
89
rt_dma_chan_start(struct rt_dma_chan * chan)90 rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan)
91 {
92 rt_err_t err;
93 struct rt_dma_controller *ctrl;
94
95 if (!chan)
96 {
97 return -RT_EINVAL;
98 }
99
100 if (chan->prep_err)
101 {
102 LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
103
104 return chan->prep_err;
105 }
106
107 ctrl = chan->ctrl;
108
109 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
110
111 err = ctrl->ops->start(chan);
112
113 rt_mutex_release(&ctrl->mutex);
114
115 return err;
116 }
117
rt_dma_chan_stop(struct rt_dma_chan * chan)118 rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan)
119 {
120 rt_err_t err;
121 struct rt_dma_controller *ctrl;
122
123 if (!chan)
124 {
125 return -RT_EINVAL;
126 }
127
128 if (chan->prep_err)
129 {
130 LOG_D("%s: Not prepare done", rt_dm_dev_get_name(chan->slave));
131
132 return chan->prep_err;
133 }
134
135 ctrl = chan->ctrl;
136
137 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
138
139 err = ctrl->ops->stop(chan);
140
141 rt_mutex_release(&ctrl->mutex);
142
143 return err;
144 }
145
rt_dma_chan_config(struct rt_dma_chan * chan,struct rt_dma_slave_config * conf)146 rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
147 struct rt_dma_slave_config *conf)
148 {
149 rt_err_t err;
150 struct rt_dma_controller *ctrl;
151 enum rt_dma_transfer_direction dir;
152
153 if (!chan || !conf)
154 {
155 err = -RT_EINVAL;
156 goto _end;
157 }
158
159 dir = conf->direction;
160
161 if (dir >= RT_DMA_DIR_MAX)
162 {
163 err = -RT_EINVAL;
164 goto _end;
165 }
166
167 if (conf->src_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX ||
168 conf->dst_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX)
169 {
170 err = -RT_EINVAL;
171 goto _end;
172 }
173
174 ctrl = chan->ctrl;
175
176 if (!rt_bitmap_test_bit(ctrl->dir_cap, dir))
177 {
178 err = -RT_ENOSYS;
179 goto _end;
180 }
181
182 if (!chan->name && dir != RT_DMA_MEM_TO_MEM)
183 {
184 LOG_E("%s: illegal config for uname channels",
185 rt_dm_dev_get_name(ctrl->dev));
186
187 err = -RT_EINVAL;
188 goto _end;
189 }
190
191 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
192
193 err = ctrl->ops->config(chan, conf);
194
195 rt_mutex_release(&ctrl->mutex);
196
197 if (!err)
198 {
199 rt_memcpy(&chan->conf, conf, sizeof(*conf));
200 }
201
202 _end:
203 chan->conf_err = err;
204
205 return err;
206 }
207
rt_dma_chan_done(struct rt_dma_chan * chan,rt_size_t size)208 rt_err_t rt_dma_chan_done(struct rt_dma_chan *chan, rt_size_t size)
209 {
210 if (!chan)
211 {
212 return -RT_EINVAL;
213 }
214
215 if (chan->callback)
216 {
217 chan->callback(chan, size);
218 }
219
220 return RT_EOK;
221 }
222
range_is_illegal(const char * name,const char * desc,rt_ubase_t addr0,rt_ubase_t addr1)223 static rt_bool_t range_is_illegal(const char *name, const char *desc,
224 rt_ubase_t addr0, rt_ubase_t addr1)
225 {
226 rt_bool_t illegal = addr0 < addr1;
227
228 if (illegal)
229 {
230 LOG_E("%s: %s %p is out of config %p", name, desc, addr0, addr1);
231 }
232
233 return illegal;
234 }
235
rt_dma_prep_memcpy(struct rt_dma_chan * chan,struct rt_dma_slave_transfer * transfer)236 rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
237 struct rt_dma_slave_transfer *transfer)
238 {
239 rt_err_t err;
240 rt_size_t len;
241 rt_ubase_t dma_addr_src, dma_addr_dst;
242 struct rt_dma_controller *ctrl;
243 struct rt_dma_slave_config *conf;
244
245 if (!chan || !transfer)
246 {
247 return -RT_EINVAL;
248 }
249
250 ctrl = chan->ctrl;
251 conf = &chan->conf;
252
253 if (chan->conf_err)
254 {
255 LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
256
257 return chan->conf_err;
258 }
259
260 RT_ASSERT(chan->conf.direction == RT_DMA_MEM_TO_MEM);
261 dma_addr_src = transfer->src_addr;
262 dma_addr_dst = transfer->dst_addr;
263 len = transfer->buffer_len;
264
265 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
266 dma_addr_src, conf->src_addr))
267 {
268 return -RT_EINVAL;
269 }
270
271 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
272 dma_addr_dst, conf->dst_addr))
273 {
274 return -RT_EINVAL;
275 }
276
277 if (ctrl->ops->prep_memcpy)
278 {
279 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
280
281 err = ctrl->ops->prep_memcpy(chan, dma_addr_src, dma_addr_dst, len);
282
283 rt_mutex_release(&ctrl->mutex);
284 }
285 else
286 {
287 err = -RT_ENOSYS;
288 }
289
290 if (!err)
291 {
292 rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
293 }
294
295 chan->prep_err = err;
296
297 return err;
298 }
299
rt_dma_prep_cyclic(struct rt_dma_chan * chan,struct rt_dma_slave_transfer * transfer)300 rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
301 struct rt_dma_slave_transfer *transfer)
302 {
303 rt_err_t err;
304 rt_ubase_t dma_buf_addr;
305 struct rt_dma_controller *ctrl;
306 struct rt_dma_slave_config *conf;
307 enum rt_dma_transfer_direction dir;
308
309 if (!chan || !transfer)
310 {
311 return -RT_EINVAL;
312 }
313
314 ctrl = chan->ctrl;
315 conf = &chan->conf;
316
317 if (chan->conf_err)
318 {
319 LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
320
321 return chan->conf_err;
322 }
323
324 dir = chan->conf.direction;
325
326 if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
327 {
328 dma_buf_addr = transfer->src_addr;
329
330 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
331 dma_buf_addr, conf->src_addr))
332 {
333 return -RT_EINVAL;
334 }
335 }
336 else if (dir == RT_DMA_DEV_TO_MEM)
337 {
338 dma_buf_addr = transfer->dst_addr;
339
340 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
341 dma_buf_addr, conf->dst_addr))
342 {
343 return -RT_EINVAL;
344 }
345 }
346 else
347 {
348 dma_buf_addr = ~0UL;
349 }
350
351 if (ctrl->ops->prep_cyclic)
352 {
353 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
354
355 err = ctrl->ops->prep_cyclic(chan, dma_buf_addr,
356 transfer->buffer_len, transfer->period_len, dir);
357
358 rt_mutex_release(&ctrl->mutex);
359 }
360 else
361 {
362 err = -RT_ENOSYS;
363 }
364
365 if (!err)
366 {
367 rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
368 }
369
370 chan->prep_err = err;
371
372 return err;
373 }
374
rt_dma_prep_single(struct rt_dma_chan * chan,struct rt_dma_slave_transfer * transfer)375 rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
376 struct rt_dma_slave_transfer *transfer)
377 {
378 rt_err_t err;
379 rt_ubase_t dma_buf_addr;
380 struct rt_dma_controller *ctrl;
381 struct rt_dma_slave_config *conf;
382 enum rt_dma_transfer_direction dir;
383
384 if (!chan || !transfer)
385 {
386 return -RT_EINVAL;
387 }
388
389 ctrl = chan->ctrl;
390 conf = &chan->conf;
391
392 if (chan->conf_err)
393 {
394 LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
395
396 return chan->conf_err;
397 }
398
399 dir = chan->conf.direction;
400
401 if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
402 {
403 dma_buf_addr = transfer->src_addr;
404
405 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
406 dma_buf_addr, conf->src_addr))
407 {
408 return -RT_EINVAL;
409 }
410 }
411 else if (dir == RT_DMA_DEV_TO_MEM)
412 {
413 dma_buf_addr = transfer->dst_addr;
414
415 if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
416 dma_buf_addr, conf->dst_addr))
417 {
418 return -RT_EINVAL;
419 }
420 }
421 else
422 {
423 dma_buf_addr = ~0UL;
424 }
425
426 if (ctrl->ops->prep_single)
427 {
428 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
429
430 err = ctrl->ops->prep_single(chan, dma_buf_addr,
431 transfer->buffer_len, dir);
432
433 rt_mutex_release(&ctrl->mutex);
434 }
435 else
436 {
437 err = -RT_ENOSYS;
438 }
439
440 if (!err)
441 {
442 rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
443 }
444
445 chan->prep_err = err;
446
447 return err;
448 }
449
ofw_find_dma_controller(struct rt_device * dev,const char * name,struct rt_ofw_cell_args * args)450 static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
451 const char *name, struct rt_ofw_cell_args *args)
452 {
453 struct rt_dma_controller *ctrl = RT_NULL;
454 #ifdef RT_USING_OFW
455 int index;
456 struct rt_ofw_node *np = dev->ofw_node, *ctrl_np;
457
458 if (!np)
459 {
460 return RT_NULL;
461 }
462
463 index = rt_ofw_prop_index_of_string(np, "dma-names", name);
464
465 if (index < 0)
466 {
467 return RT_NULL;
468 }
469
470 if (!rt_ofw_parse_phandle_cells(np, "dmas", "#dma-cells", index, args))
471 {
472 ctrl_np = args->data;
473
474 if (!rt_ofw_data(ctrl_np))
475 {
476 rt_platform_ofw_request(ctrl_np);
477 }
478
479 ctrl = rt_ofw_data(ctrl_np);
480 rt_ofw_node_put(ctrl_np);
481 }
482 #endif /* RT_USING_OFW */
483 return ctrl;
484 }
485
rt_dma_chan_request(struct rt_device * dev,const char * name)486 struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
487 {
488 void *fw_data = RT_NULL;
489 struct rt_dma_chan *chan;
490 struct rt_ofw_cell_args dma_args;
491 struct rt_dma_controller *ctrl = RT_NULL;
492
493 if (!dev)
494 {
495 return rt_err_ptr(-RT_EINVAL);
496 }
497
498 if (name)
499 {
500 fw_data = &dma_args;
501 ctrl = ofw_find_dma_controller(dev, name, &dma_args);
502 }
503 else
504 {
505 struct rt_dma_controller *ctrl_tmp;
506
507 rt_spin_lock(&dmac_nodes_lock);
508 rt_list_for_each_entry(ctrl_tmp, &dmac_nodes, list)
509 {
510 /* Only memory to memory for uname request */
511 if (rt_bitmap_test_bit(ctrl_tmp->dir_cap, RT_DMA_MEM_TO_MEM))
512 {
513 ctrl = ctrl_tmp;
514 break;
515 }
516 }
517 rt_spin_unlock(&dmac_nodes_lock);
518 }
519
520 if (rt_is_err_or_null(ctrl))
521 {
522 return ctrl ? ctrl : rt_err_ptr(-RT_ENOSYS);
523 }
524
525 if (ctrl->ops->request_chan)
526 {
527 chan = ctrl->ops->request_chan(ctrl, dev, fw_data);
528 }
529 else
530 {
531 chan = rt_calloc(1, sizeof(*chan));
532
533 if (!chan)
534 {
535 chan = rt_err_ptr(-RT_ENOMEM);
536 }
537 }
538
539 if (rt_is_err(chan))
540 {
541 return chan;
542 }
543
544 if (!chan)
545 {
546 LOG_E("%s: unset request channels error", rt_dm_dev_get_name(ctrl->dev));
547
548 return rt_err_ptr(-RT_ERROR);
549 }
550
551 chan->name = name;
552 chan->ctrl = ctrl;
553 chan->slave = dev;
554
555 rt_list_init(&chan->list);
556 chan->conf_err = -RT_ERROR;
557 chan->prep_err = -RT_ERROR;
558
559 rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
560 rt_list_insert_before(&ctrl->channels_nodes, &chan->list);
561 rt_mutex_release(&ctrl->mutex);
562
563 return chan;
564 }
565
rt_dma_chan_release(struct rt_dma_chan * chan)566 rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan)
567 {
568 rt_err_t err = RT_EOK;
569
570 if (!chan)
571 {
572 return -RT_EINVAL;
573 }
574
575 rt_mutex_take(&chan->ctrl->mutex, RT_WAITING_FOREVER);
576 rt_list_remove(&chan->list);
577 rt_mutex_release(&chan->ctrl->mutex);
578
579 if (chan->ctrl->ops->release_chan)
580 {
581 err = chan->ctrl->ops->release_chan(chan);
582 }
583 else
584 {
585 rt_free(chan);
586 }
587
588 return err;
589 }
590