1 /*
2  * Copyright (c) 2024, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/debug/cpu_load.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/counter.h>
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(cpu_load, CONFIG_CPU_LOAD_LOG_LEVEL);
12 
13 BUILD_ASSERT(!IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER) || DT_HAS_CHOSEN(zephyr_cpu_load_counter));
14 
15 #ifndef CONFIG_CPU_LOAD_LOG_PERIODICALLY
16 #define CONFIG_CPU_LOAD_LOG_PERIODICALLY 0
17 #endif
18 
19 static const struct device *counter = COND_CODE_1(CONFIG_CPU_LOAD_USE_COUNTER,
20 				(DEVICE_DT_GET(DT_CHOSEN(zephyr_cpu_load_counter))), (NULL));
21 static uint32_t enter_ts;
22 static uint64_t cyc_start;
23 static uint64_t ticks_idle;
24 
25 static cpu_load_cb_t load_cb;
26 static uint8_t cpu_load_threshold_percent;
27 
cpu_load_log_fn(struct k_timer * dummy)28 static void cpu_load_log_fn(struct k_timer *dummy)
29 {
30 	int load = cpu_load_get(true);
31 	uint32_t percent = load / 10;
32 	uint32_t fraction = load % 10;
33 
34 	LOG_INF("Load:%d.%03d%%", percent, fraction);
35 	if (load_cb != NULL && percent >= cpu_load_threshold_percent) {
36 		load_cb(percent);
37 	}
38 }
39 
40 K_TIMER_DEFINE(cpu_load_timer, cpu_load_log_fn, NULL);
41 
cpu_load_log_control(bool enable)42 void cpu_load_log_control(bool enable)
43 {
44 	if (CONFIG_CPU_LOAD_LOG_PERIODICALLY == 0) {
45 		return;
46 	}
47 	if (enable) {
48 		k_timer_start(&cpu_load_timer, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY),
49 			      K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
50 	} else {
51 		k_timer_stop(&cpu_load_timer);
52 	}
53 }
54 
cpu_load_cb_reg(cpu_load_cb_t cb,uint8_t threshold_percent)55 int cpu_load_cb_reg(cpu_load_cb_t cb, uint8_t threshold_percent)
56 {
57 	if (threshold_percent > 100) {
58 		return -EINVAL;
59 	}
60 
61 	cpu_load_threshold_percent = threshold_percent;
62 	load_cb = cb;
63 	return 0;
64 }
65 
66 #if CONFIG_CPU_LOAD_USE_COUNTER || CONFIG_CPU_LOAD_LOG_PERIODICALLY
67 
cpu_load_init(void)68 static int cpu_load_init(void)
69 {
70 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
71 		int err = counter_start(counter);
72 
73 		(void)err;
74 		__ASSERT_NO_MSG(err == 0);
75 	}
76 
77 	if (CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0) {
78 		k_timer_start(&cpu_load_timer, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY),
79 			      K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
80 	}
81 
82 	return 0;
83 }
84 
85 SYS_INIT(cpu_load_init, POST_KERNEL, 0);
86 #endif
87 
cpu_load_on_enter_idle(void)88 void cpu_load_on_enter_idle(void)
89 {
90 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
91 		counter_get_value(counter, &enter_ts);
92 		return;
93 	}
94 
95 	enter_ts = k_cycle_get_32();
96 }
97 
cpu_load_on_exit_idle(void)98 void cpu_load_on_exit_idle(void)
99 {
100 	uint32_t now;
101 
102 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
103 		counter_get_value(counter, &now);
104 	} else {
105 		now = k_cycle_get_32();
106 	}
107 
108 	ticks_idle += now - enter_ts;
109 }
110 
cpu_load_get(bool reset)111 int cpu_load_get(bool reset)
112 {
113 	uint64_t idle_us;
114 	uint64_t now = IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER) ?
115 		k_cycle_get_64() : k_cycle_get_32();
116 	uint64_t total = now - cyc_start;
117 	uint64_t total_us = k_cyc_to_us_floor64(total);
118 	uint32_t res;
119 	uint64_t active_us;
120 
121 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
122 		if (ticks_idle > (uint64_t)UINT32_MAX) {
123 			return -ERANGE;
124 		}
125 		idle_us = counter_ticks_to_us(counter, (uint32_t)ticks_idle);
126 	} else {
127 		idle_us = k_cyc_to_us_floor64(ticks_idle);
128 	}
129 
130 	idle_us = MIN(idle_us, total_us);
131 	active_us = total_us - idle_us;
132 
133 	res = (uint32_t)((1000 * active_us) / total_us);
134 
135 	if (reset) {
136 		cyc_start = now;
137 		ticks_idle = 0;
138 	}
139 
140 	return res;
141 }
142