1 /*
2  * Copyright (c) 2024, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/debug/cpu_load.h>
9 #include <zephyr/ztest.h>
10 #include <zephyr/logging/log_ctrl.h>
11 #include <zephyr/logging/log_backend.h>
12 
13 #include <zephyr/drivers/counter.h>
14 
15 #define DELTA 30
16 
ZTEST(cpu_load,test_load)17 ZTEST(cpu_load, test_load)
18 {
19 	int load;
20 	uint32_t t_ms = 100;
21 
22 	if (CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0) {
23 		cpu_load_log_control(false);
24 	}
25 
26 	/* Reset the measurement */
27 	(void)cpu_load_get(true);
28 	k_busy_wait(t_ms * USEC_PER_MSEC);
29 
30 	/* Measurement is not reset. */
31 	load = cpu_load_get(false);
32 	/* Result in per mille */
33 	zassert_within(load, 1000, DELTA);
34 
35 	k_msleep(t_ms);
36 	load = cpu_load_get(false);
37 	zassert_within(load, 500, DELTA);
38 
39 	/* Reset the measurement */
40 	(void)cpu_load_get(true);
41 	k_msleep(t_ms);
42 	load = cpu_load_get(false);
43 	zassert_within(load, 0, DELTA);
44 }
45 
46 #if CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0
47 static int cpu_load_src_id;
48 static atomic_t log_cnt;
49 
process(const struct log_backend * const backend,union log_msg_generic * msg)50 static void process(const struct log_backend *const backend,
51 		union log_msg_generic *msg)
52 {
53 	ARG_UNUSED(backend);
54 	const void *source = msg->log.hdr.source;
55 	int source_id = log_const_source_id((const struct log_source_const_data *)source);
56 
57 	if (source_id == cpu_load_src_id) {
58 		atomic_inc(&log_cnt);
59 	}
60 }
61 
init(const struct log_backend * const backend)62 static void init(const struct log_backend *const backend)
63 {
64 	ARG_UNUSED(backend);
65 }
66 
67 const struct log_backend_api mock_log_backend_api = {
68 	.process = process,
69 	.init = init
70 };
71 
72 LOG_BACKEND_DEFINE(dummy, mock_log_backend_api, false, NULL);
73 
ZTEST(cpu_load,test_periodic_report)74 ZTEST(cpu_load, test_periodic_report)
75 {
76 	log_backend_enable(&dummy, NULL, LOG_LEVEL_INF);
77 	cpu_load_log_control(true);
78 
79 	cpu_load_src_id = log_source_id_get(STRINGIFY(cpu_load));
80 	atomic_set(&log_cnt, 0);
81 	k_msleep(3 * CONFIG_CPU_LOAD_LOG_PERIODICALLY);
82 	zassert_within(log_cnt, 3, 1);
83 
84 	cpu_load_log_control(false);
85 	k_msleep(1);
86 	atomic_set(&log_cnt, 0);
87 	k_msleep(3 * CONFIG_CPU_LOAD_LOG_PERIODICALLY);
88 	zassert_equal(log_cnt, 0);
89 
90 	cpu_load_log_control(true);
91 	k_msleep(3 * CONFIG_CPU_LOAD_LOG_PERIODICALLY);
92 	zassert_within(log_cnt, 3, 1);
93 
94 	cpu_load_log_control(false);
95 	log_backend_disable(&dummy);
96 }
97 
low_load_cb(uint8_t percent)98 void low_load_cb(uint8_t percent)
99 {
100 	/* Should never be called */
101 	zassert_true(false, NULL);
102 }
103 
104 static uint32_t num_load_callbacks;
105 static uint8_t last_cpu_load_percent;
106 
high_load_cb(uint8_t percent)107 void high_load_cb(uint8_t percent)
108 {
109 	last_cpu_load_percent = percent;
110 	num_load_callbacks++;
111 }
112 
ZTEST(cpu_load,test_callback_load_low)113 ZTEST(cpu_load, test_callback_load_low)
114 {
115 	int ret = cpu_load_cb_reg(low_load_cb, 99);
116 
117 	zassert_equal(ret, 0);
118 	k_msleep(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4);
119 	zassert_equal(num_load_callbacks, 0);
120 }
121 
ZTEST(cpu_load,test_callback_load_high)122 ZTEST(cpu_load, test_callback_load_high)
123 {
124 	int ret = cpu_load_cb_reg(high_load_cb, 99);
125 
126 	zassert_equal(ret, 0);
127 	k_busy_wait(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4 * 1000);
128 	zassert_between_inclusive(last_cpu_load_percent, 99, 100);
129 	zassert_between_inclusive(num_load_callbacks, 2, 7);
130 
131 	/* Reset the callback */
132 	ret = cpu_load_cb_reg(NULL, 99);
133 	num_load_callbacks = 0;
134 	zassert_equal(ret, 0);
135 	k_busy_wait(CONFIG_CPU_LOAD_LOG_PERIODICALLY * 4 * 1000);
136 	zassert_equal(num_load_callbacks, 0);
137 }
138 
139 #endif /* CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0 */
140 
141 ZTEST_SUITE(cpu_load, NULL, NULL, NULL, NULL, NULL);
142