1 /*
2 * Copyright (c) 2006-2025 RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-11-26 GuEe-GUI first version
9 * 2025-01-24 wumingzi add doxygen comment
10 */
11
12 #include <rtthread.h>
13 #include <rtservice.h>
14 #include <rtdevice.h>
15
16 /**
17 * @addtogroup group_driver_clock
18 * @{
19 */
20
21 #define DBG_TAG "rtdm.clk"
22 #define DBG_LVL DBG_INFO
23 #include <rtdbg.h>
24
25 static RT_DEFINE_SPINLOCK(_clk_lock);
26 static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
27 static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
28
29 /**
30 * @brief Release clock node
31 *
32 * @param r point to reference count of clock node
33 * @warning The function only can print log and MORE DETAILS SHOULD BE IMPLEMENTED.
34 */
clk_release(struct rt_ref * r)35 static void clk_release(struct rt_ref *r)
36 {
37 struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
38
39 LOG_E("%s is release", clk_np->name);
40 (void)clk_np;
41
42 RT_ASSERT(0);
43 }
44
45 /**
46 * @brief Increase reference count for clock node
47 *
48 * @param clk_np point to clock node
49 *
50 * @return struct rt_clk_node * point to clock node whose reference count has increased
51 */
clk_get(struct rt_clk_node * clk_np)52 rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
53 {
54 rt_ref_get(&clk_np->ref);
55
56 return clk_np;
57 }
58
59 /**
60 * @brief Decrease reference count for clock node
61 *
62 * @param clk_np point to clock node
63 *
64 */
clk_put(struct rt_clk_node * clk_np)65 rt_inline void clk_put(struct rt_clk_node *clk_np)
66 {
67 rt_ref_put(&clk_np->ref, &clk_release);
68 }
69
70 /**
71 * @brief Allocate memory space for struct clock and return it
72 *
73 * @param clk_np point to clock node
74 * @param dev_id device identifier for the clock
75 * @param con_id connection identifier for the clock
76 * @param fw_node point to the firmware node associated with the clock
77 *
78 * @return struct rt_clk* point to clock
79 */
clk_alloc(struct rt_clk_node * clk_np,const char * dev_id,const char * con_id,void * fw_node)80 static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
81 const char *con_id, void *fw_node)
82 {
83 struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
84
85 if (clk)
86 {
87 clk->clk_np = clk_np;
88 clk->dev_id = dev_id;
89 clk->con_id = con_id;
90
91 clk->fw_node = fw_node;
92 }
93 else
94 {
95 clk = rt_err_ptr(-RT_ENOMEM);
96 }
97
98 return clk;
99 }
100
101 /**
102 * @brief Free memory space of clock object
103 *
104 * @param clk point to clock
105 *
106 */
clk_free(struct rt_clk * clk)107 static void clk_free(struct rt_clk *clk)
108 {
109 struct rt_clk_node *clk_np = clk->clk_np;
110
111 if (clk_np && clk_np->ops->finit)
112 {
113 clk_np->ops->finit(clk);
114 }
115
116 rt_free(clk);
117 }
118
119 /**
120 * @brief Allocate memory space and creat clock object
121 *
122 * @param clk_np point to clock node
123 * @param dev_id device identifier for the clock
124 * @param con_id connection identifier for the clock
125 * @param fw_data point to the firmware data associated with the clock
126 * @param fw_node point to the firmware node associated with the clock
127 *
128 * @return struct rt_clk* point to clock
129 */
clk_create(struct rt_clk_node * clk_np,const char * dev_id,const char * con_id,void * fw_data,void * fw_node)130 static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
131 const char *con_id, void *fw_data, void *fw_node)
132 {
133 struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
134
135 if (!rt_is_err(clk))
136 {
137 clk_get(clk_np);
138
139 if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
140 {
141 LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
142
143 clk_free(clk);
144 clk = RT_NULL;
145 }
146 }
147
148 return clk;
149 }
150
151 /**
152 * @brief Notify corresponding clock from all
153 *
154 * @param clk_np point to clock node
155 * @param msg message identifier for the event
156 * @param old_rate old rate of the clock before the event
157 * @param new_rate new rate of the clock after the event
158 *
159 * @return rt_err_t RT_EOK on notify clock sucessfully, and other value is failed.
160 */
clk_notify(struct rt_clk_node * clk_np,rt_ubase_t msg,rt_ubase_t old_rate,rt_ubase_t new_rate)161 static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
162 {
163 rt_err_t err = RT_EOK;
164 struct rt_clk_notifier *notifier;
165
166 rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
167 {
168 if (notifier->clk->clk_np == clk_np)
169 {
170 err = notifier->callback(notifier, msg, old_rate, new_rate);
171
172 /* Only check hareware's error */
173 if (err == -RT_EIO)
174 {
175 break;
176 }
177 }
178 }
179
180 return err;
181 }
182
183 /**
184 * @brief Set parent clock
185 *
186 * @param clk_np point to clock node
187 * @param parent_np point to parent rt_clk
188 *
189 */
clk_set_parent(struct rt_clk_node * clk_np,struct rt_clk_node * parent_np)190 static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
191 {
192 rt_hw_spin_lock(&_clk_lock.lock);
193
194 clk_np->parent = parent_np;
195
196 rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
197
198 rt_hw_spin_unlock(&_clk_lock.lock);
199 }
200
201 static const struct rt_clk_ops unused_clk_ops =
202 {
203 };
204
205 /**
206 * @brief Register clock node into clock list
207 *
208 * @param clk_np point to child node that will be registered node.
209 * @param parent_np point to parent rt_clk. If it is RT_NULL, clock node will be linked to init node.
210 *
211 * @retval RT_EOK
212 * @retval -RT_ENOMEM
213 */
rt_clk_register(struct rt_clk_node * clk_np,struct rt_clk_node * parent_np)214 rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
215 {
216 rt_err_t err = RT_EOK;
217 struct rt_clk *clk = RT_NULL;
218
219 if (clk_np)
220 {
221 clk_np->clk = clk;
222
223 if (!clk_np->ops)
224 {
225 clk_np->ops = &unused_clk_ops;
226 }
227
228 #if RT_NAME_MAX > 0
229 rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX);
230 #else
231 clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME;
232 #endif
233
234 rt_ref_init(&clk_np->ref);
235 rt_list_init(&clk_np->list);
236 rt_list_init(&clk_np->children_nodes);
237 clk_np->multi_clk = 0;
238
239 if (parent_np)
240 {
241 clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
242
243 if (clk_np->clk)
244 {
245 clk_set_parent(clk_np, parent_np);
246 }
247 else
248 {
249 err = -RT_ENOMEM;
250 }
251 }
252 else
253 {
254 clk_np->parent = RT_NULL;
255
256 rt_hw_spin_lock(&_clk_lock.lock);
257
258 rt_list_insert_after(&_clk_nodes, &clk_np->list);
259
260 rt_hw_spin_unlock(&_clk_lock.lock);
261 }
262 }
263 else
264 {
265 err = -RT_ENOMEM;
266 }
267
268 return err;
269 }
270
271 /**
272 * @brief Unregister clock node from clock list
273 *
274 * @param clk_np point to child node that will be Unregistered node.
275 *
276 * @retval RT_EOK
277 * @retval -RT_EBUSY
278 * @retval -RT_EINVAL
279 */
rt_clk_unregister(struct rt_clk_node * clk_np)280 rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
281 {
282 rt_err_t err = RT_EOK;
283
284 if (clk_np)
285 {
286 err = -RT_EBUSY;
287
288 rt_hw_spin_lock(&_clk_lock.lock);
289
290 if (rt_list_isempty(&clk_np->children_nodes))
291 {
292 if (rt_ref_read(&clk_np->ref) <= 1)
293 {
294 rt_list_remove(&clk_np->list);
295 clk_free(clk_np->clk);
296
297 err = RT_EOK;
298 }
299 }
300
301 rt_hw_spin_unlock(&_clk_lock.lock);
302 }
303 else
304 {
305 err = -RT_EINVAL;
306 }
307
308 return err;
309 }
310
311 /**
312 * @brief Register clock notifier into notifier list
313 *
314 * @param clk point to clock
315 * @param notifier point to notifier for register
316 *
317 * @retval RT_EOK
318 * @retval -RT_EINVAL
319 */
rt_clk_notifier_register(struct rt_clk * clk,struct rt_clk_notifier * notifier)320 rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
321 {
322 if (!clk || !clk->clk_np || !notifier)
323 {
324 return -RT_EINVAL;
325 }
326
327 rt_hw_spin_lock(&_clk_lock.lock);
328
329 ++clk->clk_np->notifier_count;
330 rt_list_init(¬ifier->list);
331 rt_list_insert_after(&_clk_notifier_nodes, ¬ifier->list);
332
333 rt_hw_spin_unlock(&_clk_lock.lock);
334
335 return RT_EOK;
336 }
337
338 /**
339 * @brief Unregister clock notifier into notifier list
340 *
341 * @param clk point to clock
342 * @param notifier point to notifier for unregister
343 *
344 * @retval RT_EOK
345 * @retval -RT_EINVAL
346 */
rt_clk_notifier_unregister(struct rt_clk * clk,struct rt_clk_notifier * notifier)347 rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
348 {
349 struct rt_clk_notifier *notifier_find;
350
351 if (!clk || !notifier)
352 {
353 return -RT_EINVAL;
354 }
355
356 rt_hw_spin_lock(&_clk_lock.lock);
357
358 rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
359 {
360 if (notifier_find->clk->clk_np == notifier->clk->clk_np)
361 {
362 --clk->clk_np->notifier_count;
363 rt_list_remove(¬ifier->list);
364
365 break;
366 }
367 }
368
369 rt_hw_spin_unlock(&_clk_lock.lock);
370
371 return RT_EOK;
372 }
373
374 /**
375 * @brief Recursively prepare clock
376 *
377 * @param clk Ponit to clock that will be prepared
378 * @param clk_np Ponit to clock node that will be prepared
379 *
380 * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
381 */
clk_prepare(struct rt_clk * clk,struct rt_clk_node * clk_np)382 static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
383 {
384 rt_err_t err = RT_EOK;
385
386 if (clk_np->parent)
387 {
388 clk_prepare(clk_np->clk, clk_np->parent);
389 }
390
391 if (clk->prepare_count == 0 && clk_np->ops->prepare)
392 {
393 err = clk_np->ops->prepare(clk);
394 }
395
396 if (!err)
397 {
398 ++clk->prepare_count;
399 }
400
401 return err;
402 }
403
404 /**
405 * @brief Prepare clock
406 *
407 * @param clk
408 *
409 * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
410 */
rt_clk_prepare(struct rt_clk * clk)411 rt_err_t rt_clk_prepare(struct rt_clk *clk)
412 {
413 rt_err_t err = RT_EOK;
414
415 RT_DEBUG_NOT_IN_INTERRUPT;
416
417 if (clk && clk->clk_np)
418 {
419 rt_hw_spin_lock(&_clk_lock.lock);
420
421 err = clk_prepare(clk, clk->clk_np);
422
423 rt_hw_spin_unlock(&_clk_lock.lock);
424 }
425
426 return err;
427 }
428
429 /**
430 * @brief Recursively unprepare clock
431 *
432 * @param clk Ponit to clock that will be unprepared
433 * @param clk_np Ponit to clock node that will be unprepared
434 *
435 */
clk_unprepare(struct rt_clk * clk,struct rt_clk_node * clk_np)436 static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
437 {
438 if (clk_np->parent)
439 {
440 clk_unprepare(clk_np->clk, clk_np->parent);
441 }
442
443 if (clk->prepare_count == 1 && clk_np->ops->unprepare)
444 {
445 clk_np->ops->unprepare(clk);
446 }
447 if (clk->prepare_count)
448 {
449 --clk->prepare_count;
450 }
451 }
452
rt_clk_unprepare(struct rt_clk * clk)453 rt_err_t rt_clk_unprepare(struct rt_clk *clk)
454 {
455 rt_err_t err = RT_EOK;
456
457 RT_DEBUG_NOT_IN_INTERRUPT;
458
459 if (clk && clk->clk_np)
460 {
461 rt_hw_spin_lock(&_clk_lock.lock);
462
463 clk_unprepare(clk, clk->clk_np);
464
465 rt_hw_spin_unlock(&_clk_lock.lock);
466 }
467
468 return err;
469 }
470
471 /**
472 * @brief Enable clock
473 *
474 * @param clk point to clock
475 *
476 * @return rt_err_t RT_EOK on enable clock FOREVER.
477 */
clk_enable(struct rt_clk * clk,struct rt_clk_node * clk_np)478 static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
479 {
480 rt_err_t err = RT_EOK;
481
482 if (clk_np->parent)
483 {
484 clk_enable(clk_np->clk, clk_np->parent);
485 }
486
487 if (clk->enable_count == 0 && clk_np->ops->enable)
488 {
489 err = clk_np->ops->enable(clk);
490 }
491
492 if (!err)
493 {
494 ++clk->enable_count;
495 }
496
497 return err;
498 }
499
500 /**
501 * @brief Enable clock
502 *
503 * @param clk point to clock
504 *
505 * @return rt_err_t RT_EOK on enable clock sucessfully, and other value is failed.
506 */
rt_clk_enable(struct rt_clk * clk)507 rt_err_t rt_clk_enable(struct rt_clk *clk)
508 {
509 rt_err_t err = RT_EOK;
510
511 if (clk && clk->clk_np)
512 {
513 rt_hw_spin_lock(&_clk_lock.lock);
514
515 err = clk_enable(clk, clk->clk_np);
516
517 rt_hw_spin_unlock(&_clk_lock.lock);
518 }
519
520 return err;
521 }
522
523 /**
524 * @brief Recursively disable clock
525 *
526 * @param clk Ponit to clock that will be disabled
527 * @param clk_np Ponit to clock node that will be disabled
528 *
529 */
clk_disable(struct rt_clk * clk,struct rt_clk_node * clk_np)530 static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
531 {
532 if (clk_np->parent)
533 {
534 clk_disable(clk_np->clk, clk_np->parent);
535 }
536
537 if (clk->enable_count == 1 && clk_np->ops->disable)
538 {
539 clk_np->ops->disable(clk);
540 }
541 if (clk->enable_count)
542 {
543 --clk->enable_count;
544 }
545 }
546
547 /**
548 * @brief Disable clock
549 *
550 * @param clk point to clock
551 *
552 */
rt_clk_disable(struct rt_clk * clk)553 void rt_clk_disable(struct rt_clk *clk)
554 {
555 if (clk && clk->clk_np)
556 {
557 rt_hw_spin_lock(&_clk_lock.lock);
558
559 clk_disable(clk, clk->clk_np);
560
561 rt_hw_spin_unlock(&_clk_lock.lock);
562 }
563 }
564
565 /**
566 * @brief Prepare and enable clock
567 *
568 * @param clk point to clock
569 *
570 * @return rt_err_t RT_EOK on prepare and enable clock sucessfully, and other value is failed.
571 */
rt_clk_prepare_enable(struct rt_clk * clk)572 rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
573 {
574 rt_err_t err = RT_EOK;
575
576 RT_DEBUG_NOT_IN_INTERRUPT;
577
578 if (clk)
579 {
580 err = rt_clk_prepare(clk);
581
582 if (!err)
583 {
584 err = rt_clk_enable(clk);
585
586 if (err)
587 {
588 rt_clk_unprepare(clk);
589 }
590 }
591 }
592
593 return err;
594 }
595
596 /**
597 * @brief Disable and unprepare clock
598 *
599 * @param clk point to clock
600 *
601 */
rt_clk_disable_unprepare(struct rt_clk * clk)602 void rt_clk_disable_unprepare(struct rt_clk *clk)
603 {
604 RT_DEBUG_NOT_IN_INTERRUPT;
605
606 if (clk)
607 {
608 rt_clk_disable(clk);
609 rt_clk_unprepare(clk);
610 }
611 }
612
613 /**
614 * @brief Prepare clock array for mutipule out clock
615 *
616 * @param clk_arr point to clock array
617 *
618 * @return rt_err_t RT_EOK on prepare clock array sucessfully, and other value is failed.
619 */
rt_clk_array_prepare(struct rt_clk_array * clk_arr)620 rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
621 {
622 rt_err_t err = RT_EOK;
623
624 if (clk_arr)
625 {
626 for (int i = 0; i < clk_arr->count; ++i)
627 {
628 if ((err = rt_clk_prepare(clk_arr->clks[i])))
629 {
630 LOG_E("CLK Array[%d] %s failed error = %s", i,
631 "prepare", rt_strerror(err));
632
633 while (i --> 0)
634 {
635 rt_clk_unprepare(clk_arr->clks[i]);
636 }
637
638 break;
639 }
640 }
641 }
642
643 return err;
644 }
645
rt_clk_array_unprepare(struct rt_clk_array * clk_arr)646 rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
647 {
648 rt_err_t err = RT_EOK;
649
650 if (clk_arr)
651 {
652 for (int i = 0; i < clk_arr->count; ++i)
653 {
654 if ((err = rt_clk_unprepare(clk_arr->clks[i])))
655 {
656 LOG_E("CLK Array[%d] %s failed error = %s", i,
657 "unprepare", rt_strerror(err));
658
659 break;
660 }
661 }
662 }
663
664 return err;
665 }
666
667 /**
668 * @brief Enable clock array for mutipule out clock
669 *
670 * @param clk_arr point to clock array
671 *
672 * @return rt_err_t RT_EOK on Enable clock array sucessfully, and other value is failed.
673 */
rt_clk_array_enable(struct rt_clk_array * clk_arr)674 rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
675 {
676 rt_err_t err = RT_EOK;
677
678 if (clk_arr)
679 {
680 for (int i = 0; i < clk_arr->count; ++i)
681 {
682 if ((err = rt_clk_enable(clk_arr->clks[i])))
683 {
684 LOG_E("CLK Array[%d] %s failed error = %s", i,
685 "enable", rt_strerror(err));
686
687 while (i --> 0)
688 {
689 rt_clk_disable(clk_arr->clks[i]);
690 }
691
692 break;
693 }
694 }
695 }
696
697 return err;
698 }
699
700 /**
701 * @brief Enable clock array for mutipule out clock
702 *
703 * @param clk_arr point to clock array
704 *
705 */
rt_clk_array_disable(struct rt_clk_array * clk_arr)706 void rt_clk_array_disable(struct rt_clk_array *clk_arr)
707 {
708 if (clk_arr)
709 {
710 for (int i = 0; i < clk_arr->count; ++i)
711 {
712 rt_clk_disable(clk_arr->clks[i]);
713 }
714 }
715 }
716
717 /**
718 * @brief Prepare and enable clock array
719 *
720 * @param clk_arr point to clock array
721 *
722 * @return rt_err_t RT_EOK on prepare and enable clock array sucessfully, and other
723 value is failed.
724 */
rt_clk_array_prepare_enable(struct rt_clk_array * clk_arr)725 rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
726 {
727 rt_err_t err;
728
729 if ((err = rt_clk_array_prepare(clk_arr)))
730 {
731 return err;
732 }
733
734 if ((err = rt_clk_array_enable(clk_arr)))
735 {
736 rt_clk_array_unprepare(clk_arr);
737 }
738
739 return err;
740 }
741
742 /**
743 * @brief Disable and unprepare clock array
744 *
745 * @param clk_arr point to clock array
746 *
747 */
rt_clk_array_disable_unprepare(struct rt_clk_array * clk_arr)748 void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
749 {
750 rt_clk_array_disable(clk_arr);
751 rt_clk_array_unprepare(clk_arr);
752 }
753
754 /**
755 * @brief Set clock rate range
756 *
757 * @param clk point to clock
758 * @param min minimum clock rate
759 * @param max minimum clock rate
760 *
761 * @return rt_err_t RT_EOK on set clock rate range sucessfully, and other value is failed.
762 */
rt_clk_set_rate_range(struct rt_clk * clk,rt_ubase_t min,rt_ubase_t max)763 rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
764 {
765 rt_err_t err = RT_EOK;
766
767 if (clk && clk->clk_np)
768 {
769 struct rt_clk_node *clk_np = clk->clk_np;
770
771 rt_hw_spin_lock(&_clk_lock.lock);
772
773 if (clk_np->ops->set_rate)
774 {
775 rt_ubase_t rate = clk_np->rate;
776 rt_ubase_t old_min = clk_np->min_rate;
777 rt_ubase_t old_max = clk_np->max_rate;
778
779 clk_np->min_rate = min;
780 clk_np->max_rate = max;
781
782 rate = rt_clamp(rate, min, max);
783 err = clk_np->ops->set_rate(clk, rate,
784 rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
785
786 if (err)
787 {
788 clk_np->min_rate = old_min;
789 clk_np->max_rate = old_max;
790 }
791 }
792 else
793 {
794 err = -RT_ENOSYS;
795 }
796
797 rt_hw_spin_unlock(&_clk_lock.lock);
798 }
799
800 return err;
801 }
802
803 /**
804 * @brief Set minimum clock rate
805 *
806 * @param clk point to clock
807 * @param rate miminum clock rate
808 *
809 * @return rt_err_t RT_EOK on set minimum clock rate sucessfully, and other value is failed.
810 */
rt_clk_set_min_rate(struct rt_clk * clk,rt_ubase_t rate)811 rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
812 {
813 rt_err_t err = RT_EOK;
814
815 if (clk && clk->clk_np)
816 {
817 struct rt_clk_node *clk_np = clk->clk_np;
818
819 err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
820 }
821
822 return err;
823 }
824
825 /**
826 * @brief Set maximum clock rate
827 *
828 * @param clk point to clock
829 * @param rate maximum clock rate
830 *
831 * @return rt_err_t RT_EOK on set maximum clock rate sucessfully, and other value is failed.
832 */
rt_clk_set_max_rate(struct rt_clk * clk,rt_ubase_t rate)833 rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
834 {
835 rt_err_t err = RT_EOK;
836
837 if (clk && clk->clk_np)
838 {
839 struct rt_clk_node *clk_np = clk->clk_np;
840
841 err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
842 }
843
844 return err;
845 }
846
847 /**
848 * @brief Set clock rate
849 *
850 * @param clk point to clock
851 * @param rate target rate
852 *
853 * @return rt_err_t RT_EOK on set clock rate sucessfully, and other value is failed.
854 */
rt_clk_set_rate(struct rt_clk * clk,rt_ubase_t rate)855 rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
856 {
857 rt_err_t err = RT_EOK;
858
859 rate = rt_clk_round_rate(clk, rate);
860
861 if (clk && clk->clk_np && rate > 0)
862 {
863 struct rt_clk_node *clk_np = clk->clk_np;
864
865 rt_hw_spin_lock(&_clk_lock.lock);
866
867 if (clk_np->min_rate && rate < clk_np->min_rate)
868 {
869 err = -RT_EINVAL;
870 }
871
872 if (clk_np->max_rate && rate > clk_np->max_rate)
873 {
874 err = -RT_EINVAL;
875 }
876
877 if (!err)
878 {
879 if (clk_np->ops->set_rate)
880 {
881 rt_ubase_t old_rate = clk_np->rate;
882
883 err = clk_np->ops->set_rate(clk, rate,
884 rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
885
886 if (clk_np->rate != old_rate)
887 {
888 clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
889 }
890 }
891 else
892 {
893 err = -RT_ENOSYS;
894 }
895 }
896
897 rt_hw_spin_unlock(&_clk_lock.lock);
898 }
899
900 return err;
901 }
902
903 /**
904 * @brief Get clock rate
905 *
906 * @param clk point to clock
907 *
908 * @return rt_ubase_t clock rate or error code
909 */
rt_clk_get_rate(struct rt_clk * clk)910 rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
911 {
912 rt_ubase_t rate = 0;
913
914 if (clk)
915 {
916 if (clk->rate)
917 {
918 rate = clk->rate;
919 }
920 else if (clk->clk_np)
921 {
922 rate = clk->clk_np->rate;
923 }
924 }
925
926 return rate;
927 }
928
929 /**
930 * @brief Set clock phase
931 *
932 * @param clk point to clock
933 * @param degrees target phase and the unit of phase is degree
934 *
935 * @return rt_err_t RT_EOK on set clock phase sucessfully, and other value is failed.
936 */
rt_clk_set_phase(struct rt_clk * clk,int degrees)937 rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
938 {
939 rt_err_t err = RT_EOK;
940
941 if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
942 {
943 rt_hw_spin_lock(&_clk_lock.lock);
944
945 err = clk->clk_np->ops->set_phase(clk, degrees);
946
947 rt_hw_spin_unlock(&_clk_lock.lock);
948 }
949
950 return err;
951 }
952
953 /**
954 * @brief Get clock phase
955 *
956 * @param clk point to clock
957 *
958 * @return rt_base_t clock phase or error code
959 */
rt_clk_get_phase(struct rt_clk * clk)960 rt_base_t rt_clk_get_phase(struct rt_clk *clk)
961 {
962 rt_base_t res = RT_EOK;
963
964 if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
965 {
966 rt_hw_spin_lock(&_clk_lock.lock);
967
968 res = clk->clk_np->ops->get_phase(clk);
969
970 rt_hw_spin_unlock(&_clk_lock.lock);
971 }
972
973 return res;
974 }
975
976 /**
977 * @brief Check if clock rate is in the minimum to maximun and get it
978 *
979 * @param clk point to clock
980 * @param rate rate will be checked
981 *
982 * @return rt_base_t get the correct rate
983 * @note if parameter rate less than the minimum or more than maximum, the
984 retrun rate will be set to minimum ormaximum value
985 */
rt_clk_round_rate(struct rt_clk * clk,rt_ubase_t rate)986 rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
987 {
988 rt_base_t res = -RT_EINVAL;
989
990 if (clk && clk->clk_np)
991 {
992 struct rt_clk_node *clk_np = clk->clk_np;
993
994 if (clk_np->ops->round_rate)
995 {
996 rt_ubase_t best_parent_rate;
997
998 rt_hw_spin_lock(&_clk_lock.lock);
999
1000 if (clk_np->min_rate && clk_np->max_rate)
1001 {
1002 rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
1003 }
1004
1005 res = clk_np->ops->round_rate(clk, rate, &best_parent_rate);
1006 (void)best_parent_rate;
1007
1008 rt_hw_spin_unlock(&_clk_lock.lock);
1009 }
1010 else
1011 {
1012 if (rate < clk_np->min_rate)
1013 {
1014 res = clk_np->min_rate;
1015 }
1016 else if (rate > clk_np->max_rate)
1017 {
1018 res = clk_np->max_rate;
1019 }
1020 else
1021 {
1022 res = rate;
1023 }
1024 }
1025 }
1026
1027 return res;
1028 }
1029
1030 /**
1031 * @brief Set clock parent object
1032 *
1033 * @param clk point to clock
1034 * @param clk_parent point to parent clock
1035 *
1036 * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed.
1037 */
rt_clk_set_parent(struct rt_clk * clk,struct rt_clk * clk_parent)1038 rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
1039 {
1040 rt_err_t err = RT_EOK;
1041
1042 if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
1043 {
1044 rt_hw_spin_lock(&_clk_lock.lock);
1045
1046 err = clk->clk_np->ops->set_parent(clk, clk_parent);
1047
1048 rt_hw_spin_unlock(&_clk_lock.lock);
1049 }
1050
1051 return err;
1052 }
1053
1054 /**
1055 * @brief Get parent clock pointer
1056 *
1057 * @param clk child clock
1058 *
1059 * @return struct rt_clk* parent clock object pointer will be return, unless child
1060 clock node havn't parent node instead return RT_NULL
1061 */
rt_clk_get_parent(struct rt_clk * clk)1062 struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
1063 {
1064 struct rt_clk *parent = RT_NULL;
1065
1066 if (clk)
1067 {
1068 struct rt_clk_node *clk_np = clk->clk_np;
1069
1070 rt_hw_spin_lock(&_clk_lock.lock);
1071
1072 parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
1073
1074 rt_hw_spin_unlock(&_clk_lock.lock);
1075 }
1076
1077 return parent;
1078 }
1079
1080 /**
1081 * @brief Get clock array pointer from ofw device node
1082 *
1083 * @param dev point to dev
1084 *
1085 * @return struct rt_clk_array* if use ofw and under normal circumstance, it will return
1086 clock array pointer and other value is RT_NULL
1087 */
rt_clk_get_array(struct rt_device * dev)1088 struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
1089 {
1090 struct rt_clk_array *clk_arr = RT_NULL;
1091
1092 #ifdef RT_USING_OFW
1093 clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
1094 #endif
1095
1096 return clk_arr;
1097 }
1098
1099 /**
1100 * @brief Get clock pointer from ofw device node by index
1101 *
1102 * @param dev point to dev
1103 * @param index index of clock object
1104 *
1105 * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
1106 pointer and other value is RT_NULL
1107 */
rt_clk_get_by_index(struct rt_device * dev,int index)1108 struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
1109 {
1110 struct rt_clk *clk = RT_NULL;
1111
1112 #ifdef RT_USING_OFW
1113 clk = rt_ofw_get_clk(dev->ofw_node, index);
1114 #endif
1115
1116 return clk;
1117 }
1118
1119 /**
1120 * @brief Get clock pointer from ofw device node by name
1121 *
1122 * @param dev point to dev
1123 * @param name name of clock object
1124 *
1125 * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
1126 pointer and other value is RT_NULL
1127 */
rt_clk_get_by_name(struct rt_device * dev,const char * name)1128 struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
1129 {
1130 struct rt_clk *clk = RT_NULL;
1131
1132 #ifdef RT_USING_OFW
1133 clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
1134 #endif
1135
1136 return clk;
1137 }
1138
1139 /**
1140 * @brief Put reference count of all colock in the clock array
1141 *
1142 * @param clk_arr point to clock array
1143 *
1144 */
rt_clk_array_put(struct rt_clk_array * clk_arr)1145 void rt_clk_array_put(struct rt_clk_array *clk_arr)
1146 {
1147 if (clk_arr)
1148 {
1149 for (int i = 0; i < clk_arr->count; ++i)
1150 {
1151 if (clk_arr->clks[i])
1152 {
1153 rt_clk_put(clk_arr->clks[i]);
1154 }
1155 else
1156 {
1157 break;
1158 }
1159 }
1160
1161 rt_free(clk_arr);
1162 }
1163 }
1164
1165 /**
1166 * @brief Put reference count of clock
1167 *
1168 * @param clk point to clock
1169 *
1170 */
rt_clk_put(struct rt_clk * clk)1171 void rt_clk_put(struct rt_clk *clk)
1172 {
1173 if (clk)
1174 {
1175 clk_put(clk->clk_np);
1176 clk_free(clk);
1177 }
1178 }
1179
1180 #ifdef RT_USING_OFW
1181 /**
1182 * @brief Get a clock object from a device tree node without acquiring a lock
1183 *
1184 * @param np point to ofw node
1185 * @param index index of clock in ofw
1186 * @param name connection identifier for the clock
1187 * @param locked lock flag for indicating whether the caller holds the lock
1188 *
1189 * @return struct rt_clk* point to the newly created clock object, or an error pointer
1190 */
ofw_get_clk_no_lock(struct rt_ofw_node * np,int index,const char * name,rt_bool_t locked)1191 static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked)
1192 {
1193 struct rt_clk *clk = RT_NULL;
1194 struct rt_ofw_cell_args clk_args;
1195
1196 if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
1197 {
1198 int count;
1199 struct rt_object *obj;
1200 struct rt_clk_node *clk_np = RT_NULL;
1201 struct rt_ofw_node *clk_ofw_np = clk_args.data;
1202
1203 if (!rt_ofw_data(clk_ofw_np))
1204 {
1205 if (locked)
1206 {
1207 rt_hw_spin_unlock(&_clk_lock.lock);
1208 }
1209
1210 rt_platform_ofw_request(clk_ofw_np);
1211
1212 if (locked)
1213 {
1214 rt_hw_spin_lock(&_clk_lock.lock);
1215 }
1216 }
1217
1218 if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np,
1219 RT_CLK_NODE_OBJ_NAME, "#clock-cells")))
1220 {
1221 clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent);
1222
1223 count = rt_ofw_count_of_clk(clk_ofw_np);
1224 }
1225
1226 rt_ofw_node_put(clk_ofw_np);
1227
1228 if (clk_np)
1229 {
1230 if (count > 1)
1231 {
1232 /* args[0] must be the index of CLK */
1233 clk_np = &clk_np[clk_args.args[0]];
1234 }
1235
1236 clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
1237 }
1238 else
1239 {
1240 clk = rt_err_ptr(-RT_ERROR);
1241 }
1242 }
1243
1244 return clk;
1245 }
1246
1247 /**
1248 * @brief Get clock from ofw with acquiring a spin lock
1249 *
1250 * @param np point to ofw node
1251 * @param index index of clock in ofw
1252 * @param name connection identifier for the clock
1253 *
1254 * @return struct rt_clk* point to the newly created clock object, or an error pointer
1255 */
ofw_get_clk(struct rt_ofw_node * np,int index,const char * name)1256 static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
1257 {
1258 struct rt_clk *clk;
1259
1260 rt_hw_spin_lock(&_clk_lock.lock);
1261
1262 clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE);
1263
1264 rt_hw_spin_unlock(&_clk_lock.lock);
1265
1266 return clk;
1267 }
1268
1269 /**
1270 * @brief Get clock array from ofw
1271 *
1272 * @param np point to ofw node
1273 *
1274 * @return struct rt_clk_array* point to the newly created clock array, or an error pointer
1275 */
rt_ofw_get_clk_array(struct rt_ofw_node * np)1276 struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
1277 {
1278 int count;
1279 struct rt_clk_array *clk_arr = RT_NULL;
1280
1281 if (!np)
1282 {
1283 return rt_err_ptr(-RT_EINVAL);
1284 }
1285
1286 if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
1287 {
1288 clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
1289
1290 if (clk_arr)
1291 {
1292 int i;
1293 rt_err_t err = RT_EOK;
1294 rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
1295
1296 clk_arr->count = count;
1297
1298 rt_hw_spin_lock(&_clk_lock.lock);
1299
1300 for (i = 0; i < count; ++i)
1301 {
1302 const char *name = RT_NULL;
1303
1304 if (has_name)
1305 {
1306 rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
1307 }
1308
1309 clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE);
1310
1311 if (rt_is_err(clk_arr->clks[i]))
1312 {
1313 err = rt_ptr_err(clk_arr->clks[i]);
1314
1315 --i;
1316 break;
1317 }
1318 }
1319
1320 rt_hw_spin_unlock(&_clk_lock.lock);
1321
1322 if (i > 0 && i < count)
1323 {
1324 rt_clk_array_put(clk_arr);
1325 clk_arr = rt_err_ptr(err);
1326 }
1327 }
1328 }
1329
1330 return clk_arr;
1331 }
1332
1333 /**
1334 * @brief Get clock from ofw with acquiring a spin lock by index and node pointer
1335 *
1336 * @param np point to ofw node
1337 * @param index index of clock in ofw
1338 *
1339 * @return struct rt_clk* point to the newly created clock object, or an error pointer
1340 */
rt_ofw_get_clk(struct rt_ofw_node * np,int index)1341 struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
1342 {
1343 struct rt_clk *clk = RT_NULL;
1344
1345 if (np && index >= 0)
1346 {
1347 clk = ofw_get_clk(np, index, RT_NULL);
1348 }
1349
1350 return clk;
1351 }
1352
1353 /**
1354 * @brief Get clock from ofw with acquiring a spin lock by name
1355 *
1356 * @param np point to ofw node
1357 * @param name name of clock will be returned
1358 *
1359 * @return struct rt_clk* point to the newly created clock object, or an error pointer
1360 */
rt_ofw_get_clk_by_name(struct rt_ofw_node * np,const char * name)1361 struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
1362 {
1363 struct rt_clk *clk = RT_NULL;
1364
1365 if (np && name)
1366 {
1367 int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
1368
1369 if (index >= 0)
1370 {
1371 clk = ofw_get_clk(np, index, name);
1372 }
1373 }
1374
1375 return clk;
1376 }
1377
1378 /**
1379 * @brief Count number of clocks in ofw
1380 *
1381 * @param clk_ofw_np point to ofw node
1382 *
1383 * @return rt_ssize_t number of clocks
1384 */
rt_ofw_count_of_clk(struct rt_ofw_node * clk_ofw_np)1385 rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
1386 {
1387 if (clk_ofw_np)
1388 {
1389 struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
1390
1391 if (clk_np && clk_np->multi_clk)
1392 {
1393 return clk_np->multi_clk;
1394 }
1395 else
1396 {
1397 const fdt32_t *cell;
1398 rt_uint32_t count = 0;
1399 struct rt_ofw_prop *prop;
1400
1401 prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
1402
1403 if (prop)
1404 {
1405 rt_uint32_t max_idx = 0, idx;
1406
1407 for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
1408 cell;
1409 cell = rt_ofw_prop_next_u32(prop, cell, &idx))
1410 {
1411 if (idx > max_idx)
1412 {
1413 max_idx = idx;
1414 }
1415 }
1416
1417 count = max_idx + 1;
1418 }
1419 else
1420 {
1421 rt_ssize_t len;
1422
1423 if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
1424 {
1425 char *value = prop->value;
1426
1427 for (int i = 0; i < len; ++i, ++value)
1428 {
1429 if (*value == '\0')
1430 {
1431 ++count;
1432 }
1433 }
1434 }
1435 else
1436 {
1437 count = 1;
1438 }
1439 }
1440
1441 if (clk_np)
1442 {
1443 clk_np->multi_clk = count;
1444 }
1445
1446 return count;
1447 }
1448 }
1449
1450 return -RT_EINVAL;
1451 }
1452
1453 #endif /* RT_USING_OFW */
1454
1455 /**@}*/