1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 // Run the scheduler and possibly switch to a different thread, with a hint
6 // that the current thread should continue running if it is permitted to do so
7 // (i.e. nothing is higher priority).
8 //
9 // The caller must not be holding any spinlocks (including the scheduler lock
10 // for any thread) and must not be in an RCU read-side critical section.
11 //
12 // Returns true if the scheduler switched threads (and has switched back).
13 bool
14 scheduler_schedule(void);
15 
16 // Trigger a scheduler run to occur once it is safe.
17 //
18 // This is intended to be used instead of scheduler_schedule() when a thread is
19 // unblocked in a context that cannot easily run the scheduler, such as during a
20 // context switch.
21 //
22 // The scheduler run is not guaranteed to until the next return to userspace or
23 // the idle thread.
24 void
25 scheduler_trigger(void) REQUIRE_PREEMPT_DISABLED;
26 
27 // Run the scheduler and possibly switch to a different thread, with a hint
28 // that the current thread wants to yield the CPU even if it is still
29 // permitted to run.
30 //
31 // The caller must not be holding any spinlocks (including the scheduler lock
32 // for any thread) and must not be in an RCU read-side critical section.
33 void
34 scheduler_yield(void);
35 
36 // Run the scheduler and possibly switch to a different thread, with a hint
37 // that the current thread wants to donate its current time allocation and
38 // priority to the specified thread.
39 //
40 // The caller must not be holding any spinlocks (including the scheduler lock
41 // for any thread) and must not be in an RCU read-side critical section. The
42 // caller must hold a reference to the specified thread, and must not be the
43 // specified thread.
44 //
45 // Note that this is not guaranteed to switch to the specified thread, which
46 // may be blocked, only runnable on a remote CPU, or of lower priority than
47 // another runnable thread even after priority donation:
48 void
49 scheduler_yield_to(thread_t *target);
50 
51 // Lock a thread's scheduler state.
52 //
53 // Calling this function acquires a spinlock that protects the specified thread
54 // from concurrent changes to its scheduling state. Calls to this function must
55 // be balanced by calls to scheduler_unlock().
56 //
57 // A caller must not attempt to acquire scheduling locks for multiple threads
58 // concurrently.
59 void
60 scheduler_lock(thread_t *thread) ACQUIRE_SCHEDULER_LOCK(thread);
61 
62 // Lock a thread's scheduler state, when preemption is known to be disabled.
63 void
64 scheduler_lock_nopreempt(thread_t *thread) ACQUIRE_SCHEDULER_LOCK_NP(thread);
65 
66 // Unlock a thread's scheduler state.
67 //
68 // Calling this function releases the spinlock that was acquired by calling
69 // scheduler_lock(). Calls to this function must exactly balance calls to
70 // scheduler_lock().
71 void
72 scheduler_unlock(thread_t *thread) RELEASE_SCHEDULER_LOCK(thread);
73 
74 // Unlock a thread's scheduler state, without enabling preemption.
75 void
76 scheduler_unlock_nopreempt(thread_t *thread) RELEASE_SCHEDULER_LOCK_NP(thread);
77 
78 // Block a thread for a specified reason.
79 //
80 // Calling this function prevents the specified thread being chosen by the
81 // scheduler, until scheduler_unblock() is called on the thread for the same
82 // reason. Multiple blocks with the same reason do not nest, and can be
83 // cleared by a single unblock.
84 //
85 // The caller must either be the specified thread, or hold a reference to the
86 // specified thread, or be in an RCU read-side critical section. The caller must
87 // also hold the scheduling lock for the thread (see scheduler_lock()).
88 //
89 // Calling this function on the current thread does not immediately switch
90 // contexts; the caller must subsequently call scheduler_schedule() or
91 // scheduler_yield*() in that case (not scheduler_trigger()!). If this is done
92 // with preemption enabled, any subsequent preemption will not return until the
93 // thread has been unblocked by another thread, and any call to
94 // scheduler_yield*() may not occur until after that unblock, so it is
95 // preferable to call scheduler_schedule() or disable preemption first.
96 //
97 // Calling this function on a thread that is currently running on a remote CPU
98 // will not immediately interrupt that thread. Call scheduler_sync(thread) if
99 // it is necessary to wait until the target thread is not running.
100 void
101 scheduler_block(thread_t *thread, scheduler_block_t block)
102 	REQUIRE_SCHEDULER_LOCK(thread);
103 
104 // Block a thread for a specified reason during creation.
105 //
106 // This function has the same functionality as scheduler_block(), but the caller
107 // is not required to hold the scheduling lock for the thread. However, this
108 // function can only be used by object_create_thread handlers on a newly created
109 // thread.
110 void
111 scheduler_block_init(thread_t *thread, scheduler_block_t block);
112 
113 // Remove a block reason from a thread, possibly allowing it to run.
114 //
115 // The caller must either be the specified thread, or hold a reference to the
116 // specified thread, or be in an RCU read-side critical section. The caller must
117 // also hold the scheduling lock for the thread (see scheduler_lock());
118 //
119 // Calling this function on a thread that is immediately runnable on some CPU
120 // does not necessarily cause it to actually run. If this function returns true,
121 // the caller should call scheduler_schedule(), scheduler_trigger() or
122 // scheduler_yield*() afterwards to avoid delaying execution of the unblocked
123 // thread.
124 //
125 // Returns true if a scheduler run is needed as a consequence of this call.
126 bool
127 scheduler_unblock(thread_t *thread, scheduler_block_t block)
128 	REQUIRE_SCHEDULER_LOCK(thread);
129 
130 // Return true if a thread is blocked for a specified reason.
131 //
132 // The caller must either be the specified thread, or hold a reference to the
133 // specified thread, or be in an RCU read-side critical section.
134 //
135 // Note that this function is inherently racy: if the specified thread might
136 // be blocked or unblocked with the specified reason by a third party, then it
137 // may return an incorrect value. It is the caller's responsibility to
138 // guarantee that such races do not occur, typically by calling
139 // scheduler_lock().
140 bool
141 scheduler_is_blocked(const thread_t *thread, scheduler_block_t block);
142 
143 // Return true if a thread is available for scheduling.
144 //
145 // The caller must either be the specified thread, or hold a reference to the
146 // specified thread, or be in an RCU read-side critical section. The caller must
147 // also hold the scheduling lock for the thread (see scheduler_lock()).
148 //
149 // This function may ignore some block flags if the thread has been killed.
150 bool
151 scheduler_is_runnable(const thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
152 
153 // Return true if a thread is currently scheduled and running.
154 //
155 // The caller must either be the specified thread, or hold a reference to the
156 // specified thread, or be in an RCU read-side critical section. The caller must
157 // also hold the scheduling lock for the thread (see scheduler_lock()).
158 bool
159 scheduler_is_running(const thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
160 
161 // Wait until a specified thread is not running.
162 //
163 // The caller must not be holding any spinlocks and must not be an RCU
164 // read-side critical section. The caller must hold a reference to the
165 // specified thread.
166 //
167 // If the specified thread is not blocked, or may be unblocked by some other
168 // thread, this function may block indefinitely. There is no guarantee that
169 // the thread will not be running when this function returns; there is only a
170 // guarantee that the thread was not running at some time after the function
171 // was called.
172 //
173 // This function implies an acquire barrier that synchronises with a release
174 // barrier performed by the specified thread when it stopped running.
175 void
176 scheduler_sync(thread_t *thread);
177 
178 // Pin a thread to its current physical CPU, preventing it from migrating to
179 // other physicals CPUs.
180 //
181 // The caller must either be the specified thread, or hold a reference to the
182 // specified thread, or be in an RCU read-side critical section. The caller must
183 // also hold the scheduling lock for the thread (see scheduler_lock()).
184 //
185 // Multiple calls to this function nest; the same number of calls to
186 // scheduler_unpin() are required before a thread becomes migratable again.
187 //
188 // This function is a no-op for schedulers that do not support migration.
189 void
190 scheduler_pin(thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
191 
192 // Unpin a thread from its current physical CPU.
193 //
194 // The caller must either be the specified thread, or hold a reference to the
195 // specified thread, or be in an RCU read-side critical section. The caller must
196 // also hold the scheduling lock for the thread (see scheduler_lock()).
197 //
198 // This function is a no-op for schedulers that do not support migration.
199 void
200 scheduler_unpin(thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
201 
202 // Get the primary VCPU for a specific physical CPU.
203 //
204 // This functions returns a pointer to a VCPU on the specified physical CPU that
205 // belongs to the primary HLOS VM, which is responsible for interrupt handling
206 // and hosts the primary scheduler. This is only defined in configurations that
207 // defer most decisions and all interrupt handling to the primary HLOS.
208 //
209 // This function does not take a reference to the returned thread, so it must be
210 // called from an RCU read-side critical section.
211 thread_t *
212 scheduler_get_primary_vcpu(cpu_index_t cpu) REQUIRE_RCU_READ;
213 
214 // Returns the configured affinity of a thread.
215 //
216 // The caller must either be the specified thread, or hold a reference to the
217 // specified thread, or be in an RCU read-side critical section. The caller must
218 // also hold the scheduling lock for the thread (see scheduler_lock()).
219 //
220 // In most cases it is not correct for a thread to call this function on itself,
221 // because the result is the CPU that the scheduler _wants_ to schedule the
222 // thread on, not the CPU it _has_ scheduled the thread on. If the current
223 // thread wants to know which CPU it is running on, it can use
224 // cpulocal_get_index(); for threads that may be running remotely
225 // scheduler_get_active_affinity() should be used instead.
226 cpu_index_t
227 scheduler_get_affinity(thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
228 
229 // Returns the active affinity of a thread.
230 //
231 // The caller must either be the specified thread, or hold a reference to the
232 // specified thread, or be in an RCU read-side critical section. The caller must
233 // also hold the scheduling lock for the thread (see scheduler_lock()).
234 //
235 // This function can be used to determine which CPU a thread is actively running
236 // on, which may not reflect the configured affinity if it has been recently
237 // changed. If the thread is not currently running, this function will return
238 // the same result as scheduler_get_affinity().
239 cpu_index_t
240 scheduler_get_active_affinity(thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
241 
242 // Set the affinity of a thread.
243 //
244 // The caller must either be the specified thread, or hold a reference to the
245 // specified thread, or be in an RCU read-side critical section. The caller must
246 // also hold the scheduling lock for the thread (see scheduler_lock()).
247 //
248 // For schedulers that do not support migration, this function must only be
249 // called for threads that have not yet been activated. Threads that are pinned
250 // to a CPU cannot have their affinity changed.
251 error_t
252 scheduler_set_affinity(thread_t *thread, cpu_index_t target_cpu)
253 	REQUIRE_SCHEDULER_LOCK(thread);
254 
255 error_t
256 scheduler_set_priority(thread_t *thread, priority_t priority)
257 	REQUIRE_SCHEDULER_LOCK(thread);
258 
259 error_t
260 scheduler_set_timeslice(thread_t *thread, nanoseconds_t timeslice)
261 	REQUIRE_SCHEDULER_LOCK(thread);
262 
263 // Returns true if the specified thread has sufficient priority to immediately
264 // preempt the currently running thread.
265 //
266 // This function assumes that the specified thread is able to run on the calling
267 // CPU, regardless of the current block flags, affinity, timeslice, etc.
268 //
269 // The scheduler lock for the specified thread must be held, and it is assumed
270 // not to be the current thread.
271 bool
272 scheduler_will_preempt_current(thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread);
273