1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <limits.h>
8
9 #include <atomic.h>
10 #include <compiler.h>
11 #include <cpulocal.h>
12 #include <idle.h>
13 #include <ipi.h>
14 #include <platform_ipi.h>
15 #include <platform_timer.h>
16 #include <preempt.h>
17 #include <scheduler.h>
18 #include <thread.h>
19 #include <util.h>
20
21 #include <events/ipi.h>
22 #include <events/scheduler.h>
23
24 #include <asm/barrier.h>
25 #include <asm/event.h>
26 #include <asm/interrupt.h>
27 #include <asm/prefetch.h>
28
29 #include "event_handlers.h"
30
31 // Set this to 1 to disable the fast idle optimisation for debugging
32 #define IPI_DEBUG_NO_FAST_IDLE 0
33
34 #define REGISTER_BITS (sizeof(register_t) * (size_t)CHAR_BIT)
35
36 // We enable the fast wakeup support by default if asm_event_wait() can sleep
37 // (as it will busy-wait otherwise) and preemption is enabled. We can possibly
38 // do it without preemption if asm_event_wait() is woken by pending disabled
39 // interrupts, but that's not the case on ARMv8.
40 //
41 // If interrupts are handled by a VM, we need to be able to ask the VM to send
42 // an IPI for us. This is not currently implemented, so we force fast wakeups in
43 // such configurations even though they will block pending interrupts.
44 // FIXME:
45 #if (!ASM_EVENT_WAIT_IS_NOOP && !defined(PREEMPT_NULL) && \
46 !IPI_DEBUG_NO_FAST_IDLE) || \
47 defined(IPI_FORCE_FAST_WAKEUP_HACK)
48 #define IPI_FAST_WAKEUP 1
49 #else
50 #define IPI_FAST_WAKEUP 0
51 #endif
52
53 #if IPI_FAST_WAKEUP
54 #define IPI_WAITING_IN_IDLE util_bit(REGISTER_BITS - 1U)
55 static_assert(((size_t)IPI_REASON__MAX + 1U) < (REGISTER_BITS - 1U),
56 "IPI reasons must fit in one word, with a free bit");
57 #else
58 static_assert(((size_t)IPI_REASON__MAX + 1U) < REGISTER_BITS,
59 "IPI reasons must fit in one word");
60 #endif
61
62 CPULOCAL_DECLARE_STATIC(ipi_pending_t, ipi_pending);
63
64 void
ipi_others_relaxed(ipi_reason_t ipi)65 ipi_others_relaxed(ipi_reason_t ipi)
66 {
67 assert(ipi <= IPI_REASON__MAX);
68 const register_t ipi_bit = util_bit(ipi);
69 const cpu_index_t this_cpu = cpulocal_get_index();
70
71 for (cpu_index_t i = 0U; cpulocal_index_valid(i); i++) {
72 if (i == this_cpu) {
73 continue;
74 }
75 (void)atomic_fetch_or_explicit(
76 &CPULOCAL_BY_INDEX(ipi_pending, i).bits, ipi_bit,
77 memory_order_relaxed);
78 }
79 atomic_thread_fence(memory_order_release);
80 asm_event_wake_updated();
81 }
82
83 void
ipi_others(ipi_reason_t ipi)84 ipi_others(ipi_reason_t ipi)
85 {
86 ipi_others_relaxed(ipi);
87 #if PLATFORM_IPI_LINES > ENUM_IPI_REASON_MAX_VALUE
88 platform_ipi_others(ipi);
89 #else
90 platform_ipi_others();
91 #endif
92 }
93
94 void
ipi_others_idle(ipi_reason_t ipi)95 ipi_others_idle(ipi_reason_t ipi)
96 {
97 #if IPI_FAST_WAKEUP
98 ipi_others_relaxed(ipi);
99 #else
100 ipi_others(ipi);
101 #endif
102 }
103
104 static bool
ipi_one_and_check_wakeup_needed(ipi_reason_t ipi,cpu_index_t cpu)105 ipi_one_and_check_wakeup_needed(ipi_reason_t ipi, cpu_index_t cpu)
106 {
107 assert(ipi <= IPI_REASON__MAX);
108 const register_t ipi_bit = util_bit(ipi);
109
110 assert(cpulocal_index_valid(cpu));
111
112 register_t old_val = atomic_fetch_or_explicit(
113 &CPULOCAL_BY_INDEX(ipi_pending, cpu).bits, ipi_bit,
114 memory_order_release);
115 asm_event_wake_updated();
116
117 #if IPI_FAST_WAKEUP
118 return (old_val & IPI_WAITING_IN_IDLE) == 0U;
119 #else
120 (void)old_val;
121 return true;
122 #endif
123 }
124
125 void
ipi_one(ipi_reason_t ipi,cpu_index_t cpu)126 ipi_one(ipi_reason_t ipi, cpu_index_t cpu)
127 {
128 if (ipi_one_and_check_wakeup_needed(ipi, cpu)) {
129 #if PLATFORM_IPI_LINES > ENUM_IPI_REASON_MAX_VALUE
130 platform_ipi_one(ipi, cpu);
131 #else
132 platform_ipi_one(cpu);
133 #endif
134 }
135 }
136
137 void
ipi_one_relaxed(ipi_reason_t ipi,cpu_index_t cpu)138 ipi_one_relaxed(ipi_reason_t ipi, cpu_index_t cpu)
139 {
140 (void)ipi_one_and_check_wakeup_needed(ipi, cpu);
141 }
142
143 void
ipi_one_idle(ipi_reason_t ipi,cpu_index_t cpu)144 ipi_one_idle(ipi_reason_t ipi, cpu_index_t cpu)
145 {
146 #if IPI_FAST_WAKEUP
147 ipi_one_relaxed(ipi, cpu);
148 #else
149 ipi_one(ipi, cpu);
150 #endif
151 }
152
153 bool
ipi_clear_relaxed(ipi_reason_t ipi)154 ipi_clear_relaxed(ipi_reason_t ipi)
155 {
156 assert(ipi <= IPI_REASON__MAX);
157
158 const register_t ipi_bit = util_bit(ipi);
159
160 register_t old_val = atomic_fetch_and_explicit(
161 &CPULOCAL(ipi_pending).bits, ~ipi_bit, memory_order_acquire);
162
163 return ((old_val & ipi_bit) != 0U);
164 }
165
166 bool
ipi_clear(ipi_reason_t ipi)167 ipi_clear(ipi_reason_t ipi)
168 {
169 #if PLATFORM_IPI_LINES > ENUM_IPI_REASON_MAX_VALUE
170 platform_ipi_clear(ipi);
171 #endif
172 return ipi_clear_relaxed(ipi);
173 }
174
175 #if IPI_FAST_WAKEUP || (PLATFORM_IPI_LINES <= ENUM_IPI_REASON_MAX_VALUE)
176 static bool
ipi_handle_pending(register_t pending)177 ipi_handle_pending(register_t pending) REQUIRE_PREEMPT_DISABLED
178 {
179 bool reschedule = false;
180
181 while (pending != 0U) {
182 index_t bit = REGISTER_BITS - 1U - compiler_clz(pending);
183 pending &= ~util_bit(bit);
184 if (bit <= (index_t)IPI_REASON__MAX) {
185 ipi_reason_t ipi = (ipi_reason_t)bit;
186 if (trigger_ipi_received_event(ipi)) {
187 reschedule = true;
188 }
189 }
190 }
191
192 return reschedule;
193 }
194 #endif
195
196 #if PLATFORM_IPI_LINES > ENUM_IPI_REASON_MAX_VALUE
197 bool
ipi_handle_platform_ipi(ipi_reason_t ipi)198 ipi_handle_platform_ipi(ipi_reason_t ipi)
199 {
200 if (ipi_clear_relaxed(ipi) && trigger_ipi_received_event(ipi)) {
201 // We can't reschedule immediately as that might leave other
202 // IRQs unhandled, so defer the reschedule.
203 //
204 // This may trigger a local reschedule relaxed IPI, even if that
205 // is the IPI we just tried to handle. That is OK; since it is
206 // relaxed, we will pick it up before returning to userspace or
207 // going idle.
208 scheduler_trigger();
209 }
210
211 return true;
212 }
213 #else
214 bool
ipi_handle_platform_ipi(void)215 ipi_handle_platform_ipi(void)
216 {
217 register_t pending = atomic_exchange_explicit(
218 &CPULOCAL(ipi_pending).bits, 0U, memory_order_acquire);
219 if (ipi_handle_pending(pending)) {
220 scheduler_trigger();
221 }
222
223 return true;
224 }
225 #endif
226
227 bool
ipi_handle_relaxed(void)228 ipi_handle_relaxed(void)
229 {
230 assert_preempt_disabled();
231 bool reschedule = false;
232
233 _Atomic register_t *local_pending = &CPULOCAL(ipi_pending).bits;
234 prefetch_store_keep(local_pending);
235 register_t pending = atomic_load_relaxed(local_pending);
236 while (compiler_unexpected(pending != 0U)) {
237 ipi_reason_t ipi =
238 (ipi_reason_t)((register_t)(REGISTER_BITS - 1U -
239 compiler_clz(pending)));
240 if (ipi_clear_relaxed(ipi) && trigger_ipi_received_event(ipi)) {
241 reschedule = true;
242 }
243 pending = atomic_load_relaxed(local_pending);
244 }
245
246 return reschedule;
247 }
248
249 void
ipi_handle_thread_exit_to_user(thread_entry_reason_t reason)250 ipi_handle_thread_exit_to_user(thread_entry_reason_t reason)
251 {
252 // Relaxed IPIs are handled directly by the IRQ module for interrupts.
253 if (reason != THREAD_ENTRY_REASON_INTERRUPT) {
254 if (ipi_handle_relaxed()) {
255 (void)scheduler_schedule();
256 }
257 }
258 }
259
260 idle_state_t
ipi_handle_idle_yield(bool in_idle_thread)261 ipi_handle_idle_yield(bool in_idle_thread)
262 {
263 _Atomic register_t *local_pending = &CPULOCAL(ipi_pending).bits;
264
265 prefetch_store_keep(local_pending);
266 #if IPI_FAST_WAKEUP
267 bool must_schedule;
268 register_t pending;
269 do {
270 // Mark ourselves as waiting in idle.
271 (void)atomic_fetch_or_explicit(local_pending,
272 IPI_WAITING_IN_IDLE,
273 memory_order_relaxed);
274
275 // Sleep until there is at least one event to handle or a
276 // preemption clears IPI_WAITING_IN_IDLE.
277 //
278 // We must enable interrupts while waiting, because there is no
279 // guarantee that asm_event_wait() will be woken by pending
280 // interrupts. The ARM implementation of it, a WFE instruction,
281 // is not woken. This means that preempt_interrupt_dispatch
282 // needs to check the preempt disable count, and avoid context
283 // switching if it is nonzero!
284 asm_interrupt_enable_release(&local_pending);
285 pending = asm_event_load_before_wait(local_pending);
286 while (pending == IPI_WAITING_IN_IDLE) {
287 asm_event_wait(local_pending);
288 pending = asm_event_load_before_wait(local_pending);
289 }
290 asm_interrupt_disable_acquire(&local_pending);
291
292 // Fetch and clear the events to handle; also clear the
293 // IPI_WAITING_IN_IDLE bit if it is still set.
294 pending = atomic_exchange_explicit(local_pending, 0U,
295 memory_order_acquire);
296
297 // Handle the pending events, checking if a reschedule is
298 // required.
299 must_schedule =
300 ipi_handle_pending(pending & ~IPI_WAITING_IN_IDLE);
301
302 // Exit the loop if we must reschedule, we were preempted,
303 // or we weren't triggered by the idle thread.
304 } while (in_idle_thread && !must_schedule &&
305 ((pending & IPI_WAITING_IN_IDLE) != 0U));
306
307 // Return and ensure we don't continue to WFI.
308 return must_schedule ? IDLE_STATE_RESCHEDULE : IDLE_STATE_WAKEUP;
309 #else
310 (void)in_idle_thread;
311 return ipi_handle_relaxed() ? IDLE_STATE_RESCHEDULE : IDLE_STATE_IDLE;
312 #endif
313 }
314
315 error_t
ipi_handle_power_cpu_suspend(void)316 ipi_handle_power_cpu_suspend(void)
317 {
318 assert_preempt_disabled();
319
320 bool reschedule = ipi_handle_relaxed();
321 if (reschedule) {
322 scheduler_trigger();
323 }
324
325 // Abort the suspend if we need to reschedule
326 return reschedule ? ERROR_BUSY : OK;
327 }
328
329 #if !defined(PREEMPT_NULL)
330 bool
ipi_handle_preempt_interrupt(void)331 ipi_handle_preempt_interrupt(void)
332 {
333 #if IPI_FAST_WAKEUP
334 // Clear the waiting-in-idle flag, to force idle_yield to exit.
335 atomic_fetch_and_explicit(&CPULOCAL(ipi_pending).bits,
336 ~IPI_WAITING_IN_IDLE, memory_order_relaxed);
337 // Note that IPIs are always handled by the caller after this event
338 // completes, regardless of its result.
339 #endif
340 return false;
341 }
342 #endif
343
344 void
ipi_handle_scheduler_quiescent(void)345 ipi_handle_scheduler_quiescent(void)
346 {
347 assert_preempt_disabled();
348
349 bool reschedule = ipi_handle_relaxed();
350 if (reschedule) {
351 scheduler_trigger();
352 }
353 }
354
355 void
ipi_handle_scheduler_stop(void)356 ipi_handle_scheduler_stop(void)
357 {
358 ipi_others(IPI_REASON_ABORT_STOP);
359
360 // Delay approx 1ms to allow other cores to complete saving state.
361 // We don't wait for acknowledgement since they may be unresponsive.
362 uint32_t freq = platform_timer_get_frequency();
363
364 uint64_t now = platform_timer_get_current_ticks();
365 uint64_t end = now + ((uint64_t)freq / 1024U);
366
367 while (now < end) {
368 asm_yield();
369 now = platform_timer_get_current_ticks();
370 }
371 }
372