1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #if defined(UNIT_TESTS)
6 
7 #include <assert.h>
8 #include <hyptypes.h>
9 
10 #include <atomic.h>
11 #include <compiler.h>
12 #include <cpulocal.h>
13 #include <hyp_aspace.h>
14 #include <log.h>
15 #include <object.h>
16 #include <panic.h>
17 #include <partition.h>
18 #include <partition_alloc.h>
19 #include <preempt.h>
20 #include <scheduler.h>
21 #include <thread.h>
22 #include <timer_queue.h>
23 #include <trace.h>
24 #include <util.h>
25 
26 #include <events/object.h>
27 
28 #include <asm/event.h>
29 
30 #include "event_handlers.h"
31 
32 #define NUM_AFFINITY_SWITCH 20U
33 
34 #define SCHED_TEST_STACK_AREA (4U << 20)
35 
36 static uintptr_t	 sched_test_stack_base;
37 static uintptr_t	 sched_test_stack_end;
38 static _Atomic uintptr_t sched_test_stack_alloc;
39 
40 static _Atomic count_t sync_flag;
41 
42 CPULOCAL_DECLARE_STATIC(_Atomic uint8_t, wait_flag);
43 CPULOCAL_DECLARE_STATIC(thread_t *, test_thread);
44 CPULOCAL_DECLARE_STATIC(count_t, test_passed_count);
45 CPULOCAL_DECLARE_STATIC(_Atomic count_t, affinity_count);
46 
47 static thread_ptr_result_t
create_thread(priority_t prio,cpu_index_t cpu,sched_test_op_t op)48 create_thread(priority_t prio, cpu_index_t cpu, sched_test_op_t op)
49 	REQUIRE_PREEMPT_DISABLED
50 {
51 	thread_ptr_result_t ret;
52 
53 	sched_test_param_t param = sched_test_param_default();
54 	sched_test_param_set_parent(&param, cpulocal_get_index());
55 	sched_test_param_set_op(&param, op);
56 
57 	thread_create_t params = {
58 		.scheduler_affinity	  = cpu,
59 		.scheduler_affinity_valid = true,
60 		.scheduler_priority	  = prio,
61 		.scheduler_priority_valid = true,
62 		.kind			  = THREAD_KIND_SCHED_TEST,
63 		.params			  = sched_test_param_raw(param),
64 	};
65 
66 	ret = partition_allocate_thread(partition_get_private(), params);
67 	if (ret.e != OK) {
68 		goto out;
69 	}
70 
71 	error_t err = object_activate_thread(ret.r);
72 	if (err != OK) {
73 		object_put_thread(ret.r);
74 		ret = thread_ptr_result_error(err);
75 	}
76 
77 out:
78 	return ret;
79 }
80 
81 static void
destroy_thread(thread_t * thread)82 destroy_thread(thread_t *thread)
83 {
84 	// Wait for the thread to exit so subsequent tests do not race with it.
85 	while (atomic_load_relaxed(&thread->state) != THREAD_STATE_EXITED) {
86 		scheduler_yield_to(thread);
87 	}
88 
89 	object_put_thread(thread);
90 }
91 
92 static void
schedule_check_switched(thread_t * thread,bool switch_expected)93 schedule_check_switched(thread_t *thread, bool switch_expected)
94 {
95 	thread_t *current = thread_get_self();
96 
97 	preempt_disable();
98 	if (scheduler_schedule()) {
99 		// We must have expected a switch.
100 		assert(switch_expected);
101 	} else if (switch_expected) {
102 		// If we didn't switch, then current must have already been
103 		// preempted. For current to run again, the other thread must
104 		// have exited or is yielding to us.
105 		assert((thread->scheduler_yield_to == current) ||
106 		       (atomic_load_relaxed(&thread->state) ==
107 			THREAD_STATE_EXITED));
108 	} else {
109 		// Nothing to check.
110 	}
111 	preempt_enable();
112 }
113 
114 void
tests_scheduler_init(void)115 tests_scheduler_init(void)
116 {
117 	virt_range_result_t range = hyp_aspace_allocate(SCHED_TEST_STACK_AREA);
118 	assert(range.e == OK);
119 
120 	sched_test_stack_base =
121 		util_balign_up(range.r.base + 1U, THREAD_STACK_MAP_ALIGN);
122 	sched_test_stack_end = range.r.base + (range.r.size - 1U);
123 
124 	atomic_init(&sched_test_stack_alloc, sched_test_stack_base);
125 }
126 
127 bool
tests_scheduler_start(void)128 tests_scheduler_start(void)
129 {
130 	thread_ptr_result_t ret;
131 	uint8_t		    old;
132 
133 	// Test 1: priorities
134 	// priority > default: switch on schedule
135 	ret = create_thread(SCHEDULER_MAX_PRIORITY, cpulocal_get_index(),
136 			    SCHED_TEST_OP_INCREMENT);
137 	assert(ret.e == OK);
138 
139 	schedule_check_switched(ret.r, true);
140 
141 	old = atomic_load_relaxed(&CPULOCAL(wait_flag));
142 	assert(old == 1U);
143 	atomic_store_relaxed(&CPULOCAL(wait_flag), 0U);
144 	destroy_thread(ret.r);
145 	CPULOCAL(test_passed_count)++;
146 
147 	// priority == default: switch on yield
148 	ret = create_thread(SCHEDULER_DEFAULT_PRIORITY, cpulocal_get_index(),
149 			    SCHED_TEST_OP_INCREMENT);
150 	assert(ret.e == OK);
151 
152 	while (atomic_load_relaxed(&CPULOCAL(wait_flag)) == 0U) {
153 		scheduler_yield();
154 	}
155 	atomic_store_relaxed(&CPULOCAL(wait_flag), 0U);
156 	destroy_thread(ret.r);
157 	CPULOCAL(test_passed_count)++;
158 
159 	// priority < default: switch on directed yield
160 	ret = create_thread(SCHEDULER_MIN_PRIORITY, cpulocal_get_index(),
161 			    SCHED_TEST_OP_INCREMENT);
162 	assert(ret.e == OK);
163 
164 	schedule_check_switched(ret.r, false);
165 
166 	while (atomic_load_relaxed(&CPULOCAL(wait_flag)) == 0U) {
167 		scheduler_yield_to(ret.r);
168 	}
169 	atomic_store_relaxed(&CPULOCAL(wait_flag), 0U);
170 	destroy_thread(ret.r);
171 	CPULOCAL(test_passed_count)++;
172 
173 	// Test 2: wait for timeslice expiry
174 	ret = create_thread(SCHEDULER_DEFAULT_PRIORITY, cpulocal_get_index(),
175 			    SCHED_TEST_OP_WAKE);
176 	assert(ret.e == OK);
177 
178 	// Yield to reset the current thread's timeslice, then wait for the
179 	// other thread to run and update the wait flag.
180 	scheduler_yield();
181 	_Atomic uint8_t *wait_flag = &CPULOCAL(wait_flag);
182 	atomic_store_relaxed(wait_flag, 1U);
183 	preempt_enable();
184 	while (asm_event_load_before_wait(wait_flag) == 1U) {
185 		asm_event_wait(wait_flag);
186 	}
187 	preempt_disable();
188 
189 	assert(atomic_load_relaxed(&CPULOCAL(wait_flag)) == 0U);
190 	destroy_thread(ret.r);
191 	CPULOCAL(test_passed_count)++;
192 
193 	// Test 3: double directed yield
194 	ret = create_thread(SCHEDULER_MIN_PRIORITY, CPU_INDEX_INVALID,
195 			    SCHED_TEST_OP_INCREMENT);
196 	assert(ret.e == OK);
197 	CPULOCAL(test_thread) = ret.r;
198 
199 	ret = create_thread(SCHEDULER_MIN_PRIORITY + 1U, cpulocal_get_index(),
200 			    SCHED_TEST_OP_YIELDTO);
201 	assert(ret.e == OK);
202 
203 	schedule_check_switched(ret.r, false);
204 
205 	atomic_store_relaxed(&CPULOCAL(wait_flag), 1U);
206 	while (atomic_load_relaxed(&CPULOCAL(wait_flag)) == 1U) {
207 		scheduler_yield_to(ret.r);
208 	}
209 	atomic_store_relaxed(&CPULOCAL(wait_flag), 0U);
210 
211 	destroy_thread(ret.r);
212 	destroy_thread(CPULOCAL(test_thread));
213 	CPULOCAL(test_passed_count)++;
214 
215 #if SCHEDULER_CAN_MIGRATE
216 	error_t err;
217 
218 	// Test 4: set affinity & yield to
219 	ret = create_thread(SCHEDULER_MAX_PRIORITY, CPU_INDEX_INVALID,
220 			    SCHED_TEST_OP_YIELDTO);
221 	assert(ret.e == OK);
222 
223 	schedule_check_switched(ret.r, false);
224 
225 	CPULOCAL(test_thread) = thread_get_self();
226 	scheduler_lock_nopreempt(ret.r);
227 	err = scheduler_set_affinity(ret.r, cpulocal_get_index());
228 	scheduler_unlock_nopreempt(ret.r);
229 	assert(err == OK);
230 
231 	schedule_check_switched(ret.r, true);
232 
233 	scheduler_yield_to(ret.r);
234 	destroy_thread(ret.r);
235 	CPULOCAL(test_passed_count)++;
236 
237 	(void)atomic_fetch_add_explicit(&sync_flag, 1U, memory_order_relaxed);
238 	while (asm_event_load_before_wait(&sync_flag) < PLATFORM_MAX_CORES) {
239 		asm_event_wait(&sync_flag);
240 	}
241 
242 	// Test 5: migrate running thread
243 	ret = create_thread(SCHEDULER_DEFAULT_PRIORITY, cpulocal_get_index(),
244 			    SCHED_TEST_OP_AFFINITY);
245 	assert(ret.e == OK);
246 
247 	while (atomic_load_relaxed(&CPULOCAL(affinity_count)) <
248 	       NUM_AFFINITY_SWITCH) {
249 		scheduler_yield();
250 		scheduler_lock_nopreempt(ret.r);
251 		cpu_index_t affinity = (scheduler_get_affinity(ret.r) + 1U) %
252 				       PLATFORM_MAX_CORES;
253 		err = scheduler_set_affinity(ret.r, affinity);
254 		scheduler_unlock_nopreempt(ret.r);
255 		assert((err == OK) || (err == ERROR_RETRY));
256 	}
257 
258 	// Ensure the thread is running on the current CPU so we can yield to it
259 	// and ensure it exits.
260 	do {
261 		scheduler_lock_nopreempt(ret.r);
262 		err = scheduler_set_affinity(ret.r, cpulocal_get_index());
263 		scheduler_unlock_nopreempt(ret.r);
264 		assert((err == OK) || (err == ERROR_RETRY));
265 	} while (err == ERROR_RETRY);
266 
267 	destroy_thread(ret.r);
268 	CPULOCAL(test_passed_count)++;
269 #endif
270 
271 	return false;
272 }
273 
274 static void
sched_test_thread_entry(uintptr_t param)275 sched_test_thread_entry(uintptr_t param)
276 {
277 	cpulocal_begin();
278 
279 	sched_test_param_t test_param = sched_test_param_cast((uint32_t)param);
280 	sched_test_op_t	   op	      = sched_test_param_get_op(&test_param);
281 
282 	switch (op) {
283 	case SCHED_TEST_OP_INCREMENT:
284 		(void)atomic_fetch_add_explicit(&CPULOCAL(wait_flag), 1U,
285 						memory_order_relaxed);
286 		break;
287 	case SCHED_TEST_OP_WAKE: {
288 		_Atomic uint8_t *wait_flag = &CPULOCAL(wait_flag);
289 		cpulocal_end();
290 		while (asm_event_load_before_wait(wait_flag) == 0U) {
291 			asm_event_wait(wait_flag);
292 		}
293 		asm_event_store_and_wake(wait_flag, 0U);
294 		cpulocal_begin();
295 		break;
296 	}
297 	case SCHED_TEST_OP_YIELDTO:
298 		while (atomic_load_relaxed(&CPULOCAL(wait_flag)) == 1U) {
299 			scheduler_yield_to(CPULOCAL(test_thread));
300 		}
301 		break;
302 	case SCHED_TEST_OP_AFFINITY: {
303 		cpu_index_t parent = sched_test_param_get_parent(&test_param);
304 		_Atomic count_t *aff_count =
305 			&CPULOCAL_BY_INDEX(affinity_count, parent);
306 		while (atomic_load_relaxed(aff_count) < NUM_AFFINITY_SWITCH) {
307 			(void)atomic_fetch_add_explicit(aff_count, 1U,
308 							memory_order_relaxed);
309 			scheduler_yield();
310 		}
311 		break;
312 	}
313 	default:
314 		panic("Invalid param for sched test thread!");
315 	}
316 
317 	cpulocal_end();
318 }
319 
320 thread_func_t
sched_test_get_entry_fn(thread_kind_t kind)321 sched_test_get_entry_fn(thread_kind_t kind)
322 {
323 	assert(kind == THREAD_KIND_SCHED_TEST);
324 
325 	return sched_test_thread_entry;
326 }
327 
328 uintptr_t
sched_test_get_stack_base(thread_kind_t kind,thread_t * thread)329 sched_test_get_stack_base(thread_kind_t kind, thread_t *thread)
330 {
331 	assert(kind == THREAD_KIND_SCHED_TEST);
332 	assert(thread != NULL);
333 
334 	size_t	  stack_area = THREAD_STACK_MAP_ALIGN;
335 	uintptr_t stack_base = atomic_fetch_add_explicit(
336 		&sched_test_stack_alloc, stack_area, memory_order_relaxed);
337 
338 	assert(stack_base >= sched_test_stack_base);
339 	assert((stack_base + (stack_area - 1U)) <= sched_test_stack_end);
340 
341 	return stack_base;
342 }
343 #else
344 
345 extern char unused;
346 
347 #endif
348