1 /**
2 * \file
3 * Scheduler object functions.
4 */
5 /*
6 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
7 * Alexander Warg <warg@os.inf.tu-dresden.de>
8 * economic rights: Technische Universität Dresden (Germany)
9 *
10 * This file is part of TUD:OS and distributed under the terms of the
11 * GNU General Public License 2.
12 * Please see the COPYING-GPL-2 file for details.
13 *
14 * As a special exception, you may use this file as part of a free software
15 * library without restriction. Specifically, if other files instantiate
16 * templates or use macros or inline functions from this file, or you compile
17 * this file and link it with other files to produce an executable, this
18 * file does not by itself cause the resulting executable to be covered by
19 * the GNU General Public License. This exception does not however
20 * invalidate any other reasons why the executable file might be covered by
21 * the GNU General Public License.
22 */
23 #pragma once
24
25 #include <l4/sys/kernel_object.h>
26 #include <l4/sys/ipc.h>
27
28 /**
29 * \defgroup l4_scheduler_api Scheduler
30 * \ingroup l4_kernel_object_api
31 * C interface of the Scheduler kernel object.
32 *
33 * The Scheduler interface allows a client to manage CPU resources. The API
34 * provides functions to query scheduler information, check the online state
35 * of CPUs, query CPU idle time and to start threads on defined CPU sets.
36 *
37 * The scheduler offers a virtual device IRQ which triggers when the number of
38 * online cores changes, e.g. due to hotplug events. In contrast to hardware
39 * IRQs, this IRQ implements a limited functionality:
40 * - Only IRQ line 0 is supported, no MSI vectors.
41 * - The IRQ is edge-triggered and the IRQ mode cannot be changed.
42 * - As the IRQ is edge-triggered, it does not have to be explicitly unmasked.
43 *
44 * \includefile{l4/sys/scheduler.h}
45 */
46
47 /**
48 * CPU sets.
49 * \ingroup l4_scheduler_api
50 */
51 typedef struct l4_sched_cpu_set_t
52 {
53 /**
54 * Combination of granularity and offset.
55 *
56 * The granularity defines how many CPUs each bit in map describes. And the
57 * offset is the number of the first CPU described by the first bit in the
58 * bitmap.
59 * \pre offset must be a multiple of 2^granularity.
60 *
61 * | MSB | LSB |
62 * |:-----------------|--------------------:|
63 * | 8bit granularity | 24bit offset .. |
64 */
65 l4_umword_t gran_offset;
66
67 /**
68 * Bitmap of CPUs.
69 */
70 l4_umword_t map;
71
72 #ifdef __cplusplus
73 /// \return Get granularity value
granularityl4_sched_cpu_set_t74 unsigned char granularity() const { return gran_offset >> 24; }
75 /// \return Get offset value
offsetl4_sched_cpu_set_t76 unsigned offset() const { return gran_offset & 0x00ffffff; }
77 /// Set offset and granularity
setl4_sched_cpu_set_t78 void set(unsigned char granularity, unsigned offset)
79 { gran_offset = ((l4_umword_t)granularity << 24) | (offset & 0x00ffffff); }
80 #endif
81 } l4_sched_cpu_set_t;
82
83 /**
84 *
85 * \ingroup l4_scheduler_api
86 *
87 * \param offset Offset.
88 * \param granularity Granularitry in log2 notation.
89 * \param map Bitmap of CPUs, defaults to 1 in C++.
90 *
91 * \return CPU set.
92 */
93 L4_INLINE l4_sched_cpu_set_t
94 l4_sched_cpu_set(l4_umword_t offset, unsigned char granularity,
95 l4_umword_t map L4_DEFAULT_PARAM(1)) L4_NOTHROW;
96
97 /**
98 * \ingroup l4_scheduler_api
99 * \copybrief L4::Scheduler::info
100 *
101 * \param scheduler Scheduler object.
102 * \param[out] cpu_max Maximum number of CPUs ever available.
103 * \param[in,out] cpus \a cpus.offset is first CPU of interest.
104 * \a cpus.granularity (see l4_sched_cpu_set_t).
105 * \a cpus.map Bitmap of online CPUs.
106 *
107 * \retval 0 Success.
108 * \retval -L4_EINVAL The given CPU offset is larger than the maximum number
109 * of CPUs.
110 */
111 L4_INLINE l4_msgtag_t
112 l4_scheduler_info(l4_cap_idx_t scheduler, l4_umword_t *cpu_max,
113 l4_sched_cpu_set_t *cpus) L4_NOTHROW;
114
115 /**
116 * \internal
117 */
118 L4_INLINE l4_msgtag_t
119 l4_scheduler_info_u(l4_cap_idx_t scheduler, l4_umword_t *cpu_max,
120 l4_sched_cpu_set_t *cpus, l4_utcb_t *utcb) L4_NOTHROW;
121
122
123 /**
124 * Scheduler parameter set.
125 * \ingroup l4_scheduler_api
126 */
127 typedef struct l4_sched_param_t
128 {
129 l4_sched_cpu_set_t affinity; ///< CPU affinity.
130 l4_umword_t prio; ///< Priority for scheduling.
131 l4_umword_t quantum; ///< Timeslice in micro seconds.
132 } l4_sched_param_t;
133
134 /**
135 * Construct scheduler parameter.
136 * \ingroup l4_scheduler_api
137 */
138 L4_INLINE l4_sched_param_t
139 l4_sched_param(unsigned prio,
140 l4_cpu_time_t quantum L4_DEFAULT_PARAM(0)) L4_NOTHROW;
141
142 /**
143 * \ingroup l4_scheduler_api
144 * \copybrief L4::Scheduler::run_thread
145 *
146 * \param scheduler Scheduler object.
147 * \copydetails L4::Scheduler::run_thread
148 */
149 L4_INLINE l4_msgtag_t
150 l4_scheduler_run_thread(l4_cap_idx_t scheduler,
151 l4_cap_idx_t thread, l4_sched_param_t const *sp) L4_NOTHROW;
152
153 /**
154 * \internal
155 */
156 L4_INLINE l4_msgtag_t
157 l4_scheduler_run_thread_u(l4_cap_idx_t scheduler, l4_cap_idx_t thread,
158 l4_sched_param_t const *sp, l4_utcb_t *utcb) L4_NOTHROW;
159
160 /**
161 * \ingroup l4_scheduler_api
162 * \copybrief L4::Scheduler::idle_time
163 *
164 * \param scheduler Scheduler object.
165 * \copydetails L4::Scheduler::idle_time
166 */
167 L4_INLINE l4_msgtag_t
168 l4_scheduler_idle_time(l4_cap_idx_t scheduler, l4_sched_cpu_set_t const *cpus,
169 l4_kernel_clock_t *us) L4_NOTHROW;
170
171 /**
172 * \internal
173 */
174 L4_INLINE l4_msgtag_t
175 l4_scheduler_idle_time_u(l4_cap_idx_t scheduler, l4_sched_cpu_set_t const *cpus,
176 l4_kernel_clock_t *us, l4_utcb_t *utcb) L4_NOTHROW;
177
178
179
180 /**
181 * \ingroup l4_scheduler_api
182 * \copybrief L4::Scheduler::is_online
183 *
184 * \param scheduler Scheduler object.
185 * \param cpu CPU number whose online status should be queried.
186 *
187 * \retval true The CPU is online.
188 * \retval false The CPU is offline
189 */
190 L4_INLINE int
191 l4_scheduler_is_online(l4_cap_idx_t scheduler, l4_umword_t cpu) L4_NOTHROW;
192
193 /**
194 * \internal
195 */
196 L4_INLINE int
197 l4_scheduler_is_online_u(l4_cap_idx_t scheduler, l4_umword_t cpu,
198 l4_utcb_t *utcb) L4_NOTHROW;
199
200
201
202 /**
203 * Operations on the Scheduler object.
204 * \ingroup l4_scheduler_api
205 * \hideinitializer
206 * \internal
207 */
208 enum L4_scheduler_ops
209 {
210 L4_SCHEDULER_INFO_OP = 0UL, /**< Query infos about the scheduler */
211 L4_SCHEDULER_RUN_THREAD_OP = 1UL, /**< Run a thread on this scheduler */
212 L4_SCHEDULER_IDLE_TIME_OP = 2UL, /**< Query idle time for the scheduler */
213 };
214
215 /*************** Implementations *******************/
216
217 L4_INLINE l4_sched_cpu_set_t
l4_sched_cpu_set(l4_umword_t offset,unsigned char granularity,l4_umword_t map)218 l4_sched_cpu_set(l4_umword_t offset, unsigned char granularity,
219 l4_umword_t map) L4_NOTHROW
220 {
221 l4_sched_cpu_set_t cs;
222 cs.gran_offset = ((l4_umword_t)granularity << 24) | offset;
223 cs.map = map;
224 return cs;
225 }
226
227 L4_INLINE l4_sched_param_t
l4_sched_param(unsigned prio,l4_cpu_time_t quantum)228 l4_sched_param(unsigned prio, l4_cpu_time_t quantum) L4_NOTHROW
229 {
230 l4_sched_param_t sp;
231 sp.prio = prio;
232 sp.quantum = quantum;
233 sp.affinity = l4_sched_cpu_set(0, ~0, 1);
234 return sp;
235 }
236
237
238 L4_INLINE l4_msgtag_t
l4_scheduler_info_u(l4_cap_idx_t scheduler,l4_umword_t * cpu_max,l4_sched_cpu_set_t * cpus,l4_utcb_t * utcb)239 l4_scheduler_info_u(l4_cap_idx_t scheduler, l4_umword_t *cpu_max,
240 l4_sched_cpu_set_t *cpus, l4_utcb_t *utcb) L4_NOTHROW
241 {
242 l4_msg_regs_t *m = l4_utcb_mr_u(utcb);
243 l4_msgtag_t res;
244
245 m->mr[0] = L4_SCHEDULER_INFO_OP;
246 m->mr[1] = cpus->gran_offset;
247
248 res = l4_ipc_call(scheduler, utcb, l4_msgtag(L4_PROTO_SCHEDULER, 2, 0, 0), L4_IPC_NEVER);
249
250 if (l4_msgtag_has_error(res))
251 return res;
252
253 cpus->map = m->mr[0];
254
255 if (cpu_max)
256 *cpu_max = m->mr[1];
257
258 return res;
259 }
260
261 L4_INLINE l4_msgtag_t
l4_scheduler_run_thread_u(l4_cap_idx_t scheduler,l4_cap_idx_t thread,l4_sched_param_t const * sp,l4_utcb_t * utcb)262 l4_scheduler_run_thread_u(l4_cap_idx_t scheduler, l4_cap_idx_t thread,
263 l4_sched_param_t const *sp, l4_utcb_t *utcb) L4_NOTHROW
264 {
265 l4_msg_regs_t *m = l4_utcb_mr_u(utcb);
266 m->mr[0] = L4_SCHEDULER_RUN_THREAD_OP;
267 m->mr[1] = sp->affinity.gran_offset;
268 m->mr[2] = sp->affinity.map;
269 m->mr[3] = sp->prio;
270 m->mr[4] = sp->quantum;
271 m->mr[5] = l4_map_obj_control(0, 0);
272 m->mr[6] = l4_obj_fpage(thread, 0, L4_CAP_FPAGE_RWS).raw;
273
274 return l4_ipc_call(scheduler, utcb, l4_msgtag(L4_PROTO_SCHEDULER, 5, 1, 0), L4_IPC_NEVER);
275 }
276
277 L4_INLINE l4_msgtag_t
l4_scheduler_idle_time_u(l4_cap_idx_t scheduler,l4_sched_cpu_set_t const * cpus,l4_kernel_clock_t * us,l4_utcb_t * utcb)278 l4_scheduler_idle_time_u(l4_cap_idx_t scheduler, l4_sched_cpu_set_t const *cpus,
279 l4_kernel_clock_t *us, l4_utcb_t *utcb) L4_NOTHROW
280 {
281 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
282 l4_msgtag_t res;
283
284 v->mr[0] = L4_SCHEDULER_IDLE_TIME_OP;
285 v->mr[1] = cpus->gran_offset;
286 v->mr[2] = cpus->map;
287
288 res = l4_ipc_call(scheduler, utcb,
289 l4_msgtag(L4_PROTO_SCHEDULER, 3, 0, 0), L4_IPC_NEVER);
290
291 if (l4_msgtag_has_error(res))
292 return res;
293
294 *us = v->mr64[l4_utcb_mr64_idx(0)];
295
296 return res;
297 }
298
299
300 L4_INLINE int
l4_scheduler_is_online_u(l4_cap_idx_t scheduler,l4_umword_t cpu,l4_utcb_t * utcb)301 l4_scheduler_is_online_u(l4_cap_idx_t scheduler, l4_umword_t cpu,
302 l4_utcb_t *utcb) L4_NOTHROW
303 {
304 l4_sched_cpu_set_t s;
305 l4_msgtag_t r;
306 s.gran_offset = cpu;
307 r = l4_scheduler_info_u(scheduler, NULL, &s, utcb);
308 if (l4_msgtag_has_error(r) || l4_msgtag_label(r) < 0)
309 return 0;
310
311 return s.map & 1;
312 }
313
314
315 L4_INLINE l4_msgtag_t
l4_scheduler_info(l4_cap_idx_t scheduler,l4_umword_t * cpu_max,l4_sched_cpu_set_t * cpus)316 l4_scheduler_info(l4_cap_idx_t scheduler, l4_umword_t *cpu_max,
317 l4_sched_cpu_set_t *cpus) L4_NOTHROW
318 {
319 return l4_scheduler_info_u(scheduler, cpu_max, cpus, l4_utcb());
320 }
321
322 L4_INLINE l4_msgtag_t
l4_scheduler_run_thread(l4_cap_idx_t scheduler,l4_cap_idx_t thread,l4_sched_param_t const * sp)323 l4_scheduler_run_thread(l4_cap_idx_t scheduler,
324 l4_cap_idx_t thread, l4_sched_param_t const *sp) L4_NOTHROW
325 {
326 return l4_scheduler_run_thread_u(scheduler, thread, sp, l4_utcb());
327 }
328
329 L4_INLINE l4_msgtag_t
l4_scheduler_idle_time(l4_cap_idx_t scheduler,l4_sched_cpu_set_t const * cpus,l4_kernel_clock_t * us)330 l4_scheduler_idle_time(l4_cap_idx_t scheduler, l4_sched_cpu_set_t const *cpus,
331 l4_kernel_clock_t *us) L4_NOTHROW
332 {
333 return l4_scheduler_idle_time_u(scheduler, cpus, us, l4_utcb());
334 }
335
336 L4_INLINE int
l4_scheduler_is_online(l4_cap_idx_t scheduler,l4_umword_t cpu)337 l4_scheduler_is_online(l4_cap_idx_t scheduler, l4_umword_t cpu) L4_NOTHROW
338 {
339 return l4_scheduler_is_online_u(scheduler, cpu, l4_utcb());
340 }
341