1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021, Bootlin
4 * Copyright (c) 2023, STMicroelectronics
5 */
6
7 #include <config.h>
8 #include <drivers/clk.h>
9 #include <kernel/boot.h>
10 #include <kernel/mutex_pm_aware.h>
11 #include <kernel/panic.h>
12 #include <kernel/thread.h>
13 #include <malloc.h>
14 #include <stddef.h>
15 #include <stdio.h>
16
17 /* Global clock tree access protection complying the power state transitions */
18 static struct mutex_pm_aware mu = MUTEX_PM_AWARE_INITIALIZER;
19
20 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
21 static SLIST_HEAD(, clk) clock_list = SLIST_HEAD_INITIALIZER(clock_list);
22 #endif
23
lock_clk(void)24 static void lock_clk(void)
25 {
26 mutex_pm_aware_lock(&mu);
27 }
28
unlock_clk(void)29 static void unlock_clk(void)
30 {
31 mutex_pm_aware_unlock(&mu);
32 }
33
clk_alloc(const char * name,const struct clk_ops * ops,struct clk ** parent_clks,size_t parent_count)34 struct clk *clk_alloc(const char *name, const struct clk_ops *ops,
35 struct clk **parent_clks, size_t parent_count)
36 {
37 struct clk *clk = NULL;
38 size_t parent = 0;
39
40 clk = calloc(1, sizeof(*clk) + parent_count * sizeof(clk));
41 if (!clk)
42 return NULL;
43
44 clk->num_parents = parent_count;
45 for (parent = 0; parent < parent_count; parent++)
46 clk->parents[parent] = parent_clks[parent];
47
48 clk->name = name;
49 clk->ops = ops;
50 refcount_set(&clk->enabled_count, 0);
51
52 return clk;
53 }
54
clk_free(struct clk * clk)55 void clk_free(struct clk *clk)
56 {
57 free(clk);
58 }
59
clk_check(struct clk * clk)60 static bool __maybe_unused clk_check(struct clk *clk)
61 {
62 if (!clk || !clk->ops)
63 return false;
64
65 if (clk->ops->set_parent && !clk->ops->get_parent)
66 return false;
67
68 if (clk->num_parents > 1 && !clk->ops->get_parent)
69 return false;
70
71 return true;
72 }
73
clk_compute_rate_no_lock(struct clk * clk)74 static void clk_compute_rate_no_lock(struct clk *clk)
75 {
76 unsigned long parent_rate = 0;
77
78 if (clk->parent)
79 parent_rate = clk->parent->rate;
80
81 if (clk->ops->get_rate)
82 clk->rate = clk->ops->get_rate(clk, parent_rate);
83 else
84 clk->rate = parent_rate;
85 }
86
clk_get_parent_by_index(struct clk * clk,size_t pidx)87 struct clk *clk_get_parent_by_index(struct clk *clk, size_t pidx)
88 {
89 if (pidx >= clk->num_parents)
90 return NULL;
91
92 return clk->parents[pidx];
93 }
94
clk_init_parent(struct clk * clk)95 static void clk_init_parent(struct clk *clk)
96 {
97 size_t pidx = 0;
98
99 switch (clk->num_parents) {
100 case 0:
101 break;
102 case 1:
103 clk->parent = clk->parents[0];
104 break;
105 default:
106 pidx = clk->ops->get_parent(clk);
107 assert(pidx < clk->num_parents);
108
109 clk->parent = clk->parents[pidx];
110 break;
111 }
112 }
113
clk_register(struct clk * clk)114 TEE_Result clk_register(struct clk *clk)
115 {
116 assert(clk_check(clk));
117
118 clk_init_parent(clk);
119 clk_compute_rate_no_lock(clk);
120
121 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
122 SLIST_INSERT_HEAD(&clock_list, clk, link);
123 #endif
124
125 DMSG("Registered clock %s, freq %lu", clk->name, clk_get_rate(clk));
126
127 return TEE_SUCCESS;
128 }
129
clk_is_enabled_no_lock(struct clk * clk)130 static bool clk_is_enabled_no_lock(struct clk *clk)
131 {
132 return refcount_val(&clk->enabled_count) != 0;
133 }
134
clk_is_enabled(struct clk * clk)135 bool clk_is_enabled(struct clk *clk)
136 {
137 return clk_is_enabled_no_lock(clk);
138 }
139
clk_disable_no_lock(struct clk * clk)140 static void clk_disable_no_lock(struct clk *clk)
141 {
142 struct clk *parent = NULL;
143
144 if (!refcount_dec(&clk->enabled_count))
145 return;
146
147 if (clk->ops->disable)
148 clk->ops->disable(clk);
149
150 parent = clk_get_parent(clk);
151 if (parent)
152 clk_disable_no_lock(parent);
153 }
154
clk_enable_no_lock(struct clk * clk)155 static TEE_Result clk_enable_no_lock(struct clk *clk)
156 {
157 TEE_Result res = TEE_ERROR_GENERIC;
158 struct clk *parent = NULL;
159
160 if (refcount_inc(&clk->enabled_count))
161 return TEE_SUCCESS;
162
163 parent = clk_get_parent(clk);
164 if (parent) {
165 res = clk_enable_no_lock(parent);
166 if (res)
167 return res;
168 }
169
170 if (clk->ops->enable) {
171 res = clk->ops->enable(clk);
172 if (res) {
173 if (parent)
174 clk_disable_no_lock(parent);
175
176 return res;
177 }
178 }
179
180 refcount_set(&clk->enabled_count, 1);
181
182 return TEE_SUCCESS;
183 }
184
clk_enable(struct clk * clk)185 TEE_Result clk_enable(struct clk *clk)
186 {
187 TEE_Result res = TEE_ERROR_GENERIC;
188
189 lock_clk();
190 res = clk_enable_no_lock(clk);
191 unlock_clk();
192
193 return res;
194 }
195
clk_disable(struct clk * clk)196 void clk_disable(struct clk *clk)
197 {
198 lock_clk();
199 clk_disable_no_lock(clk);
200 unlock_clk();
201 }
202
clk_get_rate(struct clk * clk)203 unsigned long clk_get_rate(struct clk *clk)
204 {
205 return clk->rate;
206 }
207
clk_set_rate_no_lock(struct clk * clk,unsigned long rate)208 static TEE_Result clk_set_rate_no_lock(struct clk *clk, unsigned long rate)
209 {
210 TEE_Result res = TEE_ERROR_GENERIC;
211 unsigned long parent_rate = 0;
212
213 if (clk->parent)
214 parent_rate = clk_get_rate(clk->parent);
215
216 assert(!(clk->flags & CLK_SET_RATE_PARENT) || clk->parent);
217 if (clk->flags & CLK_SET_RATE_PARENT) {
218 res = clk_set_rate_no_lock(clk->parent, rate);
219 if (res)
220 return res;
221 rate = clk_get_rate(clk->parent);
222 }
223
224 if (clk->ops->set_rate) {
225 if (clk->flags & CLK_SET_RATE_UNGATE) {
226 res = clk_enable_no_lock(clk);
227 if (res)
228 return res;
229 }
230
231 res = clk->ops->set_rate(clk, rate, parent_rate);
232
233 if (clk->flags & CLK_SET_RATE_UNGATE)
234 clk_disable_no_lock(clk);
235
236 if (res)
237 return res;
238 }
239
240 clk_compute_rate_no_lock(clk);
241
242 return TEE_SUCCESS;
243 }
244
clk_set_rate(struct clk * clk,unsigned long rate)245 TEE_Result clk_set_rate(struct clk *clk, unsigned long rate)
246 {
247 TEE_Result res = TEE_ERROR_GENERIC;
248
249 lock_clk();
250
251 if (clk->flags & CLK_SET_RATE_GATE && clk_is_enabled_no_lock(clk))
252 res = TEE_ERROR_BAD_STATE;
253 else
254 res = clk_set_rate_no_lock(clk, rate);
255
256 unlock_clk();
257
258 return res;
259 }
260
clk_get_parent(struct clk * clk)261 struct clk *clk_get_parent(struct clk *clk)
262 {
263 return clk->parent;
264 }
265
clk_get_parent_idx(struct clk * clk,struct clk * parent,size_t * pidx)266 static TEE_Result clk_get_parent_idx(struct clk *clk, struct clk *parent,
267 size_t *pidx)
268 {
269 size_t i = 0;
270
271 for (i = 0; i < clk_get_num_parents(clk); i++) {
272 if (clk_get_parent_by_index(clk, i) == parent) {
273 *pidx = i;
274 return TEE_SUCCESS;
275 }
276 }
277 EMSG("Clock %s is not a parent of clock %s", parent->name, clk->name);
278
279 return TEE_ERROR_BAD_PARAMETERS;
280 }
281
clk_set_parent_no_lock(struct clk * clk,struct clk * parent,size_t pidx)282 static TEE_Result clk_set_parent_no_lock(struct clk *clk, struct clk *parent,
283 size_t pidx)
284 {
285 TEE_Result res = TEE_ERROR_GENERIC;
286 bool was_enabled = false;
287
288 /* Requested parent is already the one set */
289 if (clk->parent == parent)
290 return TEE_SUCCESS;
291
292 was_enabled = clk_is_enabled_no_lock(clk);
293 /* Call is needed to decrement refcount on current parent tree */
294 if (was_enabled) {
295 if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
296 res = clk_enable_no_lock(parent);
297 if (res)
298 return res;
299 }
300
301 clk_disable_no_lock(clk);
302 }
303
304 res = clk->ops->set_parent(clk, pidx);
305 if (res)
306 goto out;
307
308 clk->parent = parent;
309
310 /* The parent changed and the rate might also have changed */
311 clk_compute_rate_no_lock(clk);
312
313 out:
314 /* Call is needed to increment refcount on the new parent tree */
315 if (was_enabled) {
316 res = clk_enable_no_lock(clk);
317 if (res)
318 panic("Failed to re-enable clock after setting parent");
319
320 if (clk->flags & CLK_SET_PARENT_PRE_ENABLE) {
321 /* Balance refcount when new parent was pre-enabled */
322 clk_disable_no_lock(parent);
323 }
324 }
325
326 return res;
327 }
328
clk_set_parent(struct clk * clk,struct clk * parent)329 TEE_Result clk_set_parent(struct clk *clk, struct clk *parent)
330 {
331 size_t pidx = 0;
332 TEE_Result res = TEE_ERROR_GENERIC;
333
334 if (clk_get_parent_idx(clk, parent, &pidx) || !clk->ops->set_parent)
335 return TEE_ERROR_BAD_PARAMETERS;
336
337 lock_clk();
338 if (clk->flags & CLK_SET_PARENT_GATE && clk_is_enabled_no_lock(clk)) {
339 res = TEE_ERROR_BAD_STATE;
340 goto out;
341 }
342
343 res = clk_set_parent_no_lock(clk, parent, pidx);
344 out:
345 unlock_clk();
346
347 return res;
348 }
349
clk_get_rates_array(struct clk * clk,size_t start_index,unsigned long * rates,size_t * nb_elts)350 TEE_Result clk_get_rates_array(struct clk *clk, size_t start_index,
351 unsigned long *rates, size_t *nb_elts)
352 {
353 if (!clk->ops->get_rates_array)
354 return TEE_ERROR_NOT_SUPPORTED;
355
356 return clk->ops->get_rates_array(clk, start_index, rates, nb_elts);
357 }
358
clk_get_rates_steps(struct clk * clk,unsigned long * min,unsigned long * max,unsigned long * step)359 TEE_Result clk_get_rates_steps(struct clk *clk, unsigned long *min,
360 unsigned long *max, unsigned long *step)
361 {
362 if (!clk->ops->get_rates_steps)
363 return TEE_ERROR_NOT_SUPPORTED;
364
365 return clk->ops->get_rates_steps(clk, min, max, step);
366 }
367
clk_get_duty_cycle(struct clk * clk,struct clk_duty_cycle * duty_cycle)368 TEE_Result clk_get_duty_cycle(struct clk *clk,
369 struct clk_duty_cycle *duty_cycle)
370 {
371 if (clk->ops->get_duty_cycle)
372 return clk->ops->get_duty_cycle(clk, duty_cycle);
373
374 if (clk->parent && (clk->flags & CLK_DUTY_CYCLE_PARENT))
375 return clk_get_duty_cycle(clk->parent, duty_cycle);
376
377 /* Default set 50% duty cycle */
378 duty_cycle->num = 1;
379 duty_cycle->den = 2;
380
381 return TEE_SUCCESS;
382 }
383
384 /* Return updated message buffer position of NULL on failure */
add_msg(char * cur,char * end,const char * fmt,...)385 static __printf(3, 4) char *add_msg(char *cur, char *end, const char *fmt, ...)
386 {
387 va_list ap = { };
388 int max_len = end - cur;
389 int ret = 0;
390
391 va_start(ap, fmt);
392 ret = vsnprintf(cur, max_len, fmt, ap);
393 va_end(ap);
394
395 if (ret < 0 || ret >= max_len)
396 return NULL;
397
398 return cur + ret;
399 }
400
find_next_clk(struct clk * parent __maybe_unused,struct clk * sibling __maybe_unused)401 static struct clk *find_next_clk(struct clk *parent __maybe_unused,
402 struct clk *sibling __maybe_unused)
403 {
404 struct clk *clk = NULL;
405
406 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
407 if (sibling)
408 clk = SLIST_NEXT(sibling, link);
409 else
410 clk = SLIST_FIRST(&clock_list);
411
412 while (clk && clk->parent != parent)
413 clk = SLIST_NEXT(clk, link);
414 #endif
415
416 return clk;
417 }
418
clk_is_parent_last_child(struct clk * clk)419 static bool clk_is_parent_last_child(struct clk *clk)
420 {
421 return !find_next_clk(clk->parent, clk);
422 }
423
indent_last_node_already_found(struct clk * node_clk,int node_indent,int cur_indent)424 static bool indent_last_node_already_found(struct clk *node_clk,
425 int node_indent, int cur_indent)
426 {
427 struct clk *clk = node_clk;
428 int n = 0;
429
430 /* Find parent clock at level @node_indent - @cur_indent - 1 */
431 for (n = 0; n < node_indent - cur_indent - 1; n++)
432 clk = clk->parent;
433
434 return clk_is_parent_last_child(clk);
435 }
436
print_clk(struct clk * clk,int indent)437 static void __maybe_unused print_clk(struct clk *clk, int indent)
438 {
439 static const char * const rate_unit[] = { "Hz", "kHz", "MHz", "GHz" };
440 int max_unit = ARRAY_SIZE(rate_unit);
441 unsigned long rate = 0;
442 char msg_buf[128] = { };
443 char *msg_end = msg_buf + sizeof(msg_buf);
444 char *msg = msg_buf;
445 int n = 0;
446
447 /*
448 * Currently prints the clock state based on the clock refcount.
449 * A future change could print the hardware clock state when
450 * related clock driver provides a struct clk_ops::is_enabled handler
451 */
452
453 if (indent) {
454 /* Indent for root clock level */
455 msg = add_msg(msg, msg_end, " ");
456 if (!msg)
457 goto out;
458
459 /* Indent for root parent to clock parent levels */
460 for (n = 0; n < indent - 1; n++) {
461 if (indent_last_node_already_found(clk, indent, n))
462 msg = add_msg(msg, msg_end, " ");
463 else
464 msg = add_msg(msg, msg_end, "| ");
465
466 if (!msg)
467 goto out;
468 }
469
470 /* Clock indentation */
471 if (clk_is_parent_last_child(clk))
472 msg = add_msg(msg, msg_end, "`-- ");
473 else
474 msg = add_msg(msg, msg_end, "|-- ");
475 } else {
476 /* Root clock indentation */
477 msg = add_msg(msg, msg_end, "o- ");
478 }
479 if (!msg)
480 goto out;
481
482 rate = clk_get_rate(clk);
483 for (n = 1; rate && !(rate % 1000) && n < max_unit; n++)
484 rate /= 1000;
485
486 msg = add_msg(msg, msg_end, "%s \t(%3s / refcnt %u / %ld %s)",
487 clk_get_name(clk),
488 refcount_val(&clk->enabled_count) ? "on " : "off",
489 refcount_val(&clk->enabled_count),
490 rate, rate_unit[n - 1]);
491 if (!msg)
492 goto out;
493
494 out:
495 if (!msg)
496 snprintf(msg_end - 4, 4, "...");
497
498 IMSG("%s", msg_buf);
499 }
500
print_tree(void)501 static void print_tree(void)
502 {
503 struct clk *clk = NULL;
504 struct clk *parent = NULL;
505 struct clk *next = NULL;
506 int indent = -1;
507
508 #ifdef CFG_DRIVERS_CLK_PRINT_TREE
509 if (SLIST_EMPTY(&clock_list)) {
510 IMSG("-- No registered clock");
511 return;
512 }
513 #endif
514
515 while (true) {
516 next = find_next_clk(parent, clk);
517 if (next) {
518 print_clk(next, indent + 1);
519 /* Enter the subtree of the next clock */
520 parent = next;
521 indent++;
522 clk = NULL;
523 } else {
524 /*
525 * We've processed all children at this level.
526 * If parent is NULL we're at the top and are done.
527 */
528 if (!parent)
529 break;
530 /*
531 * Move up one level to resume with the next
532 * child clock of the parent.
533 */
534 clk = parent;
535 parent = clk->parent;
536 indent--;
537 }
538 }
539 }
540
clk_print_tree(void)541 void clk_print_tree(void)
542 {
543 if (IS_ENABLED(CFG_DRIVERS_CLK_PRINT_TREE) &&
544 TRACE_LEVEL >= TRACE_INFO) {
545 IMSG("Clock tree summary (informative):");
546 print_tree();
547 }
548 }
549