1 /**
2 * \file
3 * Common thread related definitions.
4 */
5 /*
6 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
7 * Alexander Warg <warg@os.inf.tu-dresden.de>,
8 * Björn Döbel <doebel@os.inf.tu-dresden.de>,
9 * Torsten Frenzel <frenzel@os.inf.tu-dresden.de>
10 * economic rights: Technische Universität Dresden (Germany)
11 *
12 * This file is part of TUD:OS and distributed under the terms of the
13 * GNU General Public License 2.
14 * Please see the COPYING-GPL-2 file for details.
15 *
16 * As a special exception, you may use this file as part of a free software
17 * library without restriction. Specifically, if other files instantiate
18 * templates or use macros or inline functions from this file, or you compile
19 * this file and link it with other files to produce an executable, this
20 * file does not by itself cause the resulting executable to be covered by
21 * the GNU General Public License. This exception does not however
22 * invalidate any other reasons why the executable file might be covered by
23 * the GNU General Public License.
24 */
25 #pragma once
26
27 #include <l4/sys/types.h>
28 #include <l4/sys/utcb.h>
29 #include <l4/sys/ipc.h>
30
31 /**
32 * \defgroup l4_thread_api Thread
33 * \ingroup l4_kernel_object_api
34 * Thread object.
35 *
36 * An L4 thread is a thread of execution in the L4 context.
37 * Usually user-level and kernel threads are mapped 1:1 to each other.
38 * Thread kernel objects are created using a factory, see \ref l4_factory_api
39 * (l4_factory_create_thread()).
40 *
41 * Amongst other things an L4 thread encapsulates:
42 * - CPU state
43 * - General-purpose registers
44 * - Program counter
45 * - Stack pointer
46 * - FPU state
47 * - Scheduling parameters, see the \ref l4_scheduler_api API
48 * - Execution state
49 * - Blocked, Runnable, Running
50 *
51 * Thread objects provide an API for
52 * - Thread configuration and manipulation
53 * - Thread switching.
54 *
55 * The thread control functions are used to control various aspects of a
56 * thread. See l4_thread_control_start() for more information.
57 *
58 * \includefile{l4/sys/thread.h}
59 *
60 * For the C++ interface refer to L4::Thread.
61 */
62
63
64 /**
65 * Exchange basic thread registers.
66 * \ingroup l4_thread_api
67 *
68 * \param thread Capability selector of the thread to manipulate.
69 * \param ip New instruction pointer, use ~0UL to leave the
70 * instruction pointer unchanged.
71 * \param sp New stack pointer, use ~0UL to leave the stack
72 * pointer unchanged.
73 * \param flags Ex-regs flags, see #L4_thread_ex_regs_flags.
74 *
75 * \return System call return tag
76 *
77 * This method allows to manipulate a thread. The basic functionality is to set
78 * the instruction pointer and the stack pointer of a thread. Additionally,
79 * this method allows also to cancel ongoing IPC operations and to force the
80 * thread to raise an artificial exception (see `flags`).
81 *
82 * The thread is started using l4_scheduler_run_thread(). However, if at the
83 * time l4_scheduler_run_thread() is called, the instruction pointer of the
84 * thread is invalid, a later call to l4_thread_ex_regs() with a valid
85 * instruction pointer might start the thread.
86 */
87 L4_INLINE l4_msgtag_t
88 l4_thread_ex_regs(l4_cap_idx_t thread, l4_addr_t ip, l4_addr_t sp,
89 l4_umword_t flags) L4_NOTHROW;
90
91 /**
92 * \ingroup l4_thread_api
93 * \copybrief L4::Thread::ex_regs
94 * \param thread Capability selector of the thread to manipulate.
95 * \copydetails L4::Thread::ex_regs
96 */
97 L4_INLINE l4_msgtag_t
98 l4_thread_ex_regs_u(l4_cap_idx_t thread, l4_addr_t ip, l4_addr_t sp,
99 l4_umword_t flags, l4_utcb_t *utcb) L4_NOTHROW;
100
101 /**
102 * Exchange basic thread registers and return previous values.
103 * \ingroup l4_thread_api
104 *
105 * \param thread Capability selector of the thread to manipulate.
106 * \param[in,out] ip New instruction pointer, use ~0UL to leave the
107 * instruction pointer unchanged, return previous
108 * instruction pointer.
109 * \param[in,out] sp New stack pointer, use ~0UL to leave the stack
110 * pointer unchanged, returns previous stack pointer.
111 * \param[in,out] flags Ex-regs flags, see #L4_thread_ex_regs_flags, return
112 * previous CPU flags of the thread.
113 *
114 * \return System call return tag
115 *
116 * This method allows to manipulate and start a thread. The basic
117 * functionality is to set the instruction pointer and the stack pointer of a
118 * thread. Additionally, this method allows also to cancel ongoing IPC
119 * operations and to force the thread to raise an artificial exception (see
120 * `flags`).
121 *
122 * Returned values are valid only if function returns successfully.
123 */
124 L4_INLINE l4_msgtag_t
125 l4_thread_ex_regs_ret(l4_cap_idx_t thread, l4_addr_t *ip, l4_addr_t *sp,
126 l4_umword_t *flags) L4_NOTHROW;
127
128 /**
129 * \ingroup l4_thread_api
130 * \copybrief L4::Thread::ex_regs(l4_addr_t*,l4_addr_t*,l4_umword_t*,l4_utcb_t*)
131 * \param thread Capability selector of the thread to manipulate.
132 * \copydetails L4::Thread::ex_regs(l4_addr_t*,l4_addr_t*,l4_umword_t*,l4_utcb_t*)
133 */
134 L4_INLINE l4_msgtag_t
135 l4_thread_ex_regs_ret_u(l4_cap_idx_t thread, l4_addr_t *ip, l4_addr_t *sp,
136 l4_umword_t *flags, l4_utcb_t *utcb) L4_NOTHROW;
137
138
139
140 /**
141 * \defgroup l4_thread_control_api Thread control
142 * \ingroup l4_thread_api
143 *
144 * API for Thread Control method.
145 *
146 *
147 * The thread control API provides access to almost any parameter of a thread
148 * object. The API is based on a single invocation of the thread object.
149 * However, because of the huge amount of parameters, the API provides a set
150 * of functions to set specific parameters of a thread and a commit function
151 * to commit the thread control call (see l4_thread_control_commit()).
152 *
153 * A thread control operation must always start with l4_thread_control_start()
154 * and be committed with l4_thread_control_commit(). All other thread control
155 * parameter setter functions must be called between these two functions.
156 *
157 * An example for a sequence of thread control API calls can be found below.
158 *
159 * l4_utcb_t *u = l4_utcb(); <br>
160 * \link l4_thread_control_start() l4_thread_control_start(u)\endlink; <br>
161 * \link l4_thread_control_pager() l4_thread_control_pager(u, pager_cap)\endlink; <br>
162 * \link l4_thread_control_bind() l4_thread_control_bind (u, thread_utcb, task)\endlink; <br>
163 * \link l4_thread_control_commit() l4_thread_control_commit(u, thread_cap)\endlink; <br>
164 *
165 */
166
167 /**
168 * Start a thread control API sequence.
169 * \ingroup l4_thread_control_api
170 *
171 * This function starts a sequence of thread control API functions.
172 * After this functions any of following functions may be called in any order.
173 * - l4_thread_control_pager()
174 * - l4_thread_control_exc_handler()
175 * - l4_thread_control_bind()
176 * - l4_thread_control_alien()
177 * - l4_thread_control_ux_host_syscall() (Fiasco-UX only)
178 *
179 * To commit the changes to the thread l4_thread_control_commit() must be
180 * called in the end.
181 *
182 * \note The thread control API calls store the parameters for the thread in
183 * the UTCB of the caller, this means between l4_thread_control_start()
184 * and l4_thread_control_commit() no functions that modify the UTCB
185 * contents must be called.
186 */
187 L4_INLINE void
188 l4_thread_control_start(void) L4_NOTHROW;
189
190 /**
191 * \internal
192 * \ingroup l4_thread_control_api
193 */
194 L4_INLINE void
195 l4_thread_control_start_u(l4_utcb_t *utcb) L4_NOTHROW;
196
197 /**
198 * Set the pager.
199 * \ingroup l4_thread_control_api
200 *
201 * \param pager Capability selector invoked to send a page-fault IPC.
202 *
203 * \note The pager capability selector is interpreted in the task the thread
204 * is bound to (executes in).
205 */
206 L4_INLINE void
207 l4_thread_control_pager(l4_cap_idx_t pager) L4_NOTHROW;
208
209 /**
210 * \internal
211 * \ingroup l4_thread_control_api
212 */
213 L4_INLINE void
214 l4_thread_control_pager_u(l4_cap_idx_t pager, l4_utcb_t *utcb) L4_NOTHROW;
215
216 /**
217 * Set the exception handler.
218 * \ingroup l4_thread_control_api
219 *
220 * \param exc_handler Capability selector invoked to send an exception IPC.
221 *
222 * \note The exception-handler capability selector is interpreted in the task
223 * the thread is bound to (executes in).
224 */
225 L4_INLINE void
226 l4_thread_control_exc_handler(l4_cap_idx_t exc_handler) L4_NOTHROW;
227
228 /**
229 * \internal
230 * \ingroup l4_thread_control_api
231 */
232 L4_INLINE void
233 l4_thread_control_exc_handler_u(l4_cap_idx_t exc_handler,
234 l4_utcb_t *utcb) L4_NOTHROW;
235
236 /**
237 * Bind the thread to a task.
238 * \ingroup l4_thread_control_api
239 *
240 * \param thread_utcb The thread’s UTCB address within the task it shall
241 * be bound to. The address must be aligned
242 * (architecture dependent; at least word aligned) and
243 * it must point to at least #L4_UTCB_OFFSET bytes of
244 * kernel-user memory.
245 * \param task The task the thread shall be bound to.
246 *
247 * A thread may execute code in the context of a task if and only if the
248 * thread is bound to the task. To actually start execution,
249 * l4_thread_ex_regs() needs to be used. Execution in the context of the
250 * task means that the code has access to all the task’s resources (and
251 * only those). The executed code itself must be one of those resources. A
252 * thread can be bound at most once to a task.
253 *
254 * \note The UTCBs of different threads in the same task should not overlap
255 * in order to prevent data corruption.
256 */
257 L4_INLINE void
258 l4_thread_control_bind(l4_utcb_t *thread_utcb,
259 l4_cap_idx_t task) L4_NOTHROW;
260
261 /**
262 * \internal
263 * \ingroup l4_thread_control_api
264 */
265 L4_INLINE void
266 l4_thread_control_bind_u(l4_utcb_t *thread_utcb,
267 l4_cap_idx_t task, l4_utcb_t *utcb) L4_NOTHROW;
268
269 /**
270 * Enable alien mode.
271 * \ingroup l4_thread_control_api
272 * \param on Boolean value defining the state of the feature.
273 *
274 * Alien mode means the thread is not allowed to invoke L4 kernel objects
275 * directly and it is also not allowed to allocate FPU state. All those
276 * operations result in an exception IPC that gets sent through the pager
277 * capability. The responsible pager can then selectively allow an object
278 * invocation or allocate FPU state for the thread.
279 *
280 * This feature can be used to attach a debugger to a thread and trace all
281 * object invocations.
282 */
283 L4_INLINE void
284 l4_thread_control_alien(int on) L4_NOTHROW;
285
286 /**
287 * \internal
288 * \ingroup l4_thread_control_api
289 */
290 L4_INLINE void
291 l4_thread_control_alien_u(l4_utcb_t *utcb, int on) L4_NOTHROW;
292
293 /**
294 * Enable pass through of native host (Linux) system calls.
295 * \ingroup l4_thread_control_api
296 * \param on Boolean value defining the state of the feature.
297 *
298 * \pre Running on Fiasco-UX
299 *
300 * This enables the thread to do host system calls. This feature is only
301 * available in Fiasco-UX and ignored in other environments.
302 */
303 L4_INLINE void
304 l4_thread_control_ux_host_syscall(int on) L4_NOTHROW;
305
306 /**
307 * \internal
308 * \ingroup l4_thread_control_api
309 */
310 L4_INLINE void
311 l4_thread_control_ux_host_syscall_u(l4_utcb_t *utcb, int on) L4_NOTHROW;
312
313
314
315 /**
316 * Commit the thread control parameters.
317 * \ingroup l4_thread_control_api
318 *
319 * \param thread Capability selector of target thread to commit to.
320 * \return system call return tag
321 */
322 L4_INLINE l4_msgtag_t
323 l4_thread_control_commit(l4_cap_idx_t thread) L4_NOTHROW;
324
325 /**
326 * \internal
327 * \ingroup l4_thread_control_api
328 */
329 L4_INLINE l4_msgtag_t
330 l4_thread_control_commit_u(l4_cap_idx_t thread, l4_utcb_t *utcb) L4_NOTHROW;
331
332 /**
333 * Yield current time slice.
334 * \ingroup l4_thread_api
335 *
336 * \return system call return tag
337 */
338 L4_INLINE l4_msgtag_t
339 l4_thread_yield(void) L4_NOTHROW;
340
341 /**
342 * Switch to another thread (and donate the remaining time slice).
343 * \ingroup l4_thread_api
344 *
345 * \param to_thread The thread to switch to.
346 *
347 * \return system call return tag
348 */
349 L4_INLINE l4_msgtag_t
350 l4_thread_switch(l4_cap_idx_t to_thread) L4_NOTHROW;
351
352 /**
353 * \internal
354 * \ingroup l4_thread_api
355 */
356 L4_INLINE l4_msgtag_t
357 l4_thread_switch_u(l4_cap_idx_t to_thread, l4_utcb_t *utcb) L4_NOTHROW;
358
359
360
361 /**
362 * Get consumed time of thread in µs.
363 * \ingroup l4_thread_api
364 *
365 * \param thread Thread to get the consumed time from.
366 * \param[out] us Consumed time in µs.
367 *
368 * \return system call return tag
369 */
370 L4_INLINE l4_msgtag_t
371 l4_thread_stats_time(l4_cap_idx_t thread, l4_kernel_clock_t *us) L4_NOTHROW;
372
373 /**
374 * \internal
375 * \ingroup l4_thread_api
376 */
377 L4_INLINE l4_msgtag_t
378 l4_thread_stats_time_u(l4_cap_idx_t thread, l4_kernel_clock_t *us,
379 l4_utcb_t *utcb) L4_NOTHROW;
380
381
382 /**
383 * vCPU return from event handler.
384 * \ingroup l4_thread_api
385 *
386 * \return Message tag to be used for l4_sndfpage_add() and
387 * l4_thread_vcpu_resume_commit()
388 *
389 * The vCPU resume functionality is split in multiple functions to allow the
390 * specification of additional send-flex-pages using l4_sndfpage_add().
391 */
392 L4_INLINE l4_msgtag_t
393 l4_thread_vcpu_resume_start(void) L4_NOTHROW;
394
395 /**
396 * \internal
397 * \ingroup l4_thread_api
398 */
399 L4_INLINE l4_msgtag_t
400 l4_thread_vcpu_resume_start_u(l4_utcb_t *utcb) L4_NOTHROW;
401
402 /**
403 * Commit vCPU resume.
404 * \ingroup l4_thread_api
405 *
406 * \param thread Thread to be resumed, the invalid cap can be used
407 * for the current thread.
408 * \param tag Tag to use, returned by l4_thread_vcpu_resume_start()
409 *
410 * \return System call result message tag. In extended vCPU mode and when
411 * the virtual interrupts are cleared, the return code 1 flags an incoming
412 * IPC message, whereas 0 indicates a VM exit. An error is returned upon:
413 * - Insufficient rights on the given task capability (-L4_EPERM).
414 * - Given task capability is invalid (-L4_ENOENT).
415 * - A supplied mapping failed.
416 *
417 * To resume into another address space the capability to the target task
418 * must be set in the vCPU-state, with all lower bits in the task
419 * capability cleared (see #L4_CAP_MASK). The kernel adds the
420 * #L4_SYSF_SEND flag to this field to indicate that the capability has been
421 * referenced in the kernel. Consecutive resumes will not reference the task
422 * capability again until all bits are cleared again. To release a task use the
423 * different task capability or use an invalid capability with the
424 * #L4_SYSF_REPLY flag set.
425 *
426 * \see l4_vcpu_state_t
427 */
428 L4_INLINE l4_msgtag_t
429 l4_thread_vcpu_resume_commit(l4_cap_idx_t thread,
430 l4_msgtag_t tag) L4_NOTHROW;
431
432 /**
433 * \internal
434 * \ingroup l4_thread_api
435 */
436 L4_INLINE l4_msgtag_t
437 l4_thread_vcpu_resume_commit_u(l4_cap_idx_t thread,
438 l4_msgtag_t tag, l4_utcb_t *utcb) L4_NOTHROW;
439
440
441 /**
442 * Enable or disable the vCPU feature for the thread.
443 * \ingroup l4_thread_api
444 *
445 * \param thread Capability selector of the thread for which the vCPU
446 * feature shall be enabled or disabled.
447 * \param vcpu_state The virtual address where the kernel shall store the vCPU
448 * state in case of vCPU exits. The address must be a valid
449 * kernel-user-memory address (see l4_task_add_ku_mem()).
450 *
451 * \return Syscall return tag.
452 *
453 * This function enables the vCPU feature of the `thread` if `vcpu_state`
454 * is set to a valid kernel-user-memory address, or disables the vCPU feature
455 * if `vcpu_state` is 0. (Disable: optional, currently unsupported.)
456 */
457 L4_INLINE l4_msgtag_t
458 l4_thread_vcpu_control(l4_cap_idx_t thread, l4_addr_t vcpu_state) L4_NOTHROW;
459
460 /**
461 * \ingroup l4_thread_api
462 * \copybrief L4::Thread::vcpu_control
463 * \param thread Capability selector of the thread for which the vCPU feature
464 * shall be enabled or disabled.
465 * \copydetails L4::Thread::vcpu_control
466 */
467 L4_INLINE l4_msgtag_t
468 l4_thread_vcpu_control_u(l4_cap_idx_t thread, l4_addr_t vcpu_state,
469 l4_utcb_t *utcb) L4_NOTHROW;
470
471 /**
472 * Enable or disable the extended vCPU feature for the thread.
473 * \ingroup l4_thread_api
474 *
475 * \param thread Capability selector of the thread for which the
476 * extended vCPU feature shall be enabled or disabled.
477 * \param ext_vcpu_state The virtual address where the kernel shall store the
478 * vCPU state in case of vCPU exits. The address must be
479 * a valid kernel-user-memory address (see
480 * l4_task_add_ku_mem()).
481 *
482 * \return Systemcall result message tag.
483 *
484 * The extended vCPU feature allows the use of hardware-virtualization
485 * features such as Intel's VT or AMD's SVM.
486 *
487 * This function enables the extended vCPU feature of the `thread`
488 * if `ext_vcpu_state` is set to a valid kernel-user-memory address, or disables
489 * the vCPU feature if `ext_vcpu_state` is 0.
490 *
491 * \note The extended vCPU mode includes the normal vCPU mode.
492 */
493 L4_INLINE l4_msgtag_t
494 l4_thread_vcpu_control_ext(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state) L4_NOTHROW;
495
496 /**
497 * \ingroup l4_thread_api
498 * \copybrief L4::Thread::vcpu_control_ext
499 * \param thread Capability selector of the thread for which the extended vCPU
500 * feature shall be enabled or disabled.
501 * \copydetails L4::Thread::vcpu_control_ext
502 */
503 L4_INLINE l4_msgtag_t
504 l4_thread_vcpu_control_ext_u(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state,
505 l4_utcb_t *utcb) L4_NOTHROW;
506
507
508 /**
509 * \copybrief L4::Thread::register_del_irq
510 * \ingroup l4_thread_api
511 *
512 * \param thread Thread to register IRQ for.
513 * \param irq Capability selector for the IRQ object to be triggered.
514 *
515 * List of deletion events:
516 * * deletion of an IPC gate bound to this thread.
517 *
518 * \return System call return tag containing the return code.
519 */
520 L4_INLINE l4_msgtag_t
521 l4_thread_register_del_irq(l4_cap_idx_t thread, l4_cap_idx_t irq) L4_NOTHROW;
522
523 /**
524 * \internal
525 * \ingroup l4_thread_api
526 */
527 L4_INLINE l4_msgtag_t
528 l4_thread_register_del_irq_u(l4_cap_idx_t thread, l4_cap_idx_t irq,
529 l4_utcb_t *utcb) L4_NOTHROW;
530
531 /**
532 * Start a thread sender modification sequence.
533 * \ingroup l4_thread_api
534 *
535 * Add modification rules with l4_thread_modify_sender_add() and commit with
536 * l4_thread_modify_sender_commit(). Do not touch the UTCB between
537 * l4_thread_modify_sender_start() and l4_thread_modify_sender_commit().
538 *
539 * \see l4_thread_modify_sender_add
540 * \see l4_thread_modify_sender_commit
541 */
542 L4_INLINE l4_msgtag_t
543 l4_thread_modify_sender_start(void) L4_NOTHROW;
544
545 /**
546 * \internal
547 * \ingroup l4_thread_api
548 */
549 L4_INLINE l4_msgtag_t
550 l4_thread_modify_sender_start_u(l4_utcb_t *u) L4_NOTHROW;
551
552 /**
553 * Add a modification pattern to a sender modification sequence.
554 * \ingroup l4_thread_api
555 *
556 * \param tag Tag received from l4_thread_modify_sender_start() or
557 * previous l4_thread_modify_sender_add() calls from
558 * the same sequence.
559 * \param match_mask Bitmask of bits to match the label.
560 * \param match Bitmask that must be equal to the label after applying
561 * match_mask.
562 * \param del_bits Bits to be deleted from the label.
563 * \param add_bits Bits to be added to the label.
564 *
565 * \return 0 on sucess, <0 on error
566 *
567 * In pseudo code:
568 * if ((sender_label & match_mask) == match)
569 * { sender_label = (sender_label & ~del_bits) | add_bits; }
570 *
571 * Only the first match is applied.
572 *
573 * \see l4_thread_modify_sender_start
574 * \see l4_thread_modify_sender_commit
575 */
576 L4_INLINE int
577 l4_thread_modify_sender_add(l4_umword_t match_mask,
578 l4_umword_t match,
579 l4_umword_t del_bits,
580 l4_umword_t add_bits,
581 l4_msgtag_t *tag) L4_NOTHROW;
582
583 /**
584 * \internal
585 * \ingroup l4_thread_api
586 */
587 L4_INLINE int
588 l4_thread_modify_sender_add_u(l4_umword_t match_mask,
589 l4_umword_t match,
590 l4_umword_t del_bits,
591 l4_umword_t add_bits,
592 l4_msgtag_t *tag, l4_utcb_t *u) L4_NOTHROW;
593
594 /**
595 * Apply (commit) a sender modification sequence.
596 * \ingroup l4_thread_api
597 *
598 * The modification rules are applied to all IPCs to the thread (whether
599 * directly or by IPC gate) that are already in flight, that is that the sender
600 * is already blocking on.
601 *
602 * \see l4_thread_modify_sender_start
603 * \see l4_thread_modify_sender_add
604 */
605 L4_INLINE l4_msgtag_t
606 l4_thread_modify_sender_commit(l4_cap_idx_t thread, l4_msgtag_t tag) L4_NOTHROW;
607
608 /**
609 * \internal
610 * \ingroup l4_thread_api
611 */
612 L4_INLINE l4_msgtag_t
613 l4_thread_modify_sender_commit_u(l4_cap_idx_t thread, l4_msgtag_t tag,
614 l4_utcb_t *u) L4_NOTHROW;
615
616 /**
617 * Operations on thread objects.
618 * \ingroup l4_protocol_ops
619 * \hideinitializer
620 * \internal
621 */
622 enum L4_thread_ops
623 {
624 L4_THREAD_CONTROL_OP = 0UL, /**< Control operation */
625 L4_THREAD_EX_REGS_OP = 1UL, /**< Exchange registers operation */
626 L4_THREAD_SWITCH_OP = 2UL, /**< Do a thread switch */
627 L4_THREAD_STATS_OP = 3UL, /**< Thread statistics */
628 L4_THREAD_VCPU_RESUME_OP = 4UL, /**< VCPU resume */
629 L4_THREAD_REGISTER_DELETE_IRQ_OP = 5UL, /**< Register an IPC-gate deletion IRQ */
630 L4_THREAD_MODIFY_SENDER_OP = 6UL, /**< Modify all senders IDs that match the given pattern */
631 L4_THREAD_VCPU_CONTROL_OP = 7UL, /**< Enable / disable VCPU feature */
632 L4_THREAD_VCPU_CONTROL_EXT_OP = L4_THREAD_VCPU_CONTROL_OP | 0x10000,
633 L4_THREAD_X86_GDT_OP = 0x10UL, /**< Gdt */
634 L4_THREAD_ARM_TPIDRURO_OP = 0x10UL, /**< Set TPIDRURO register */
635 L4_THREAD_AMD64_SET_SEGMENT_BASE_OP = 0x12UL, /**< Set segment base */
636 L4_THREAD_AMD64_GET_SEGMENT_INFO_OP = 0x13UL, /**< Get segment information */
637 L4_THREAD_OPCODE_MASK = 0xffff, /**< Mask for opcodes */
638 };
639
640 /**
641 * Flags for the thread control operation.
642 * \ingroup l4_thread_api
643 * \hideinitializer
644 * \internal
645 *
646 * Values for the enabled flags need to be given in their appropriate field
647 * in the UTCB,
648 * \see l4_thread_control
649 */
650 enum L4_thread_control_flags
651 {
652 /** The pager will be given. */
653 L4_THREAD_CONTROL_SET_PAGER = 0x0010000,
654 /** The task to bind the thread to will be given. */
655 L4_THREAD_CONTROL_BIND_TASK = 0x0200000,
656 /** Alien state of the thread is set. */
657 L4_THREAD_CONTROL_ALIEN = 0x0400000,
658 /** Fiasco-UX only: pass-through of host system calls is set. */
659 L4_THREAD_CONTROL_UX_NATIVE = 0x0800000,
660 /** The exception handler of the thread will be given. */
661 L4_THREAD_CONTROL_SET_EXC_HANDLER = 0x1000000,
662 };
663
664 /**
665 * Indices for the values in the message register for thread control.
666 * \ingroup l4_thread_api
667 * \hideinitializer
668 * \internal
669 *
670 * The values indicate the index in the message registers during
671 * thread-control operation.
672 */
673 enum L4_thread_control_mr_indices
674 {
675 L4_THREAD_CONTROL_MR_IDX_FLAGS = 0, /**< \see #L4_thread_control_flags. */
676 L4_THREAD_CONTROL_MR_IDX_PAGER = 1, /**< Index for pager cap */
677 L4_THREAD_CONTROL_MR_IDX_EXC_HANDLER = 2, /**< Index for exception handler */
678 L4_THREAD_CONTROL_MR_IDX_FLAG_VALS = 4, /**< Index for feature values */
679 L4_THREAD_CONTROL_MR_IDX_BIND_UTCB = 5, /**< Index for UTCB address for bind */
680 L4_THREAD_CONTROL_MR_IDX_BIND_TASK = 6, /**< Index for task flex-page for bind */
681 };
682
683 /**
684 * Flags for the thread ex-regs operation.
685 * \ingroup l4_thread_api
686 * \hideinitializer
687 */
688 enum L4_thread_ex_regs_flags
689 {
690 L4_THREAD_EX_REGS_CANCEL = 0x10000UL, /**< Cancel ongoing IPC in the thread. */
691 L4_THREAD_EX_REGS_TRIGGER_EXCEPTION = 0x20000UL, /**< Trigger artificial exception in thread. */
692 };
693
694
695 /* IMPLEMENTATION -----------------------------------------------------------*/
696
697 #include <l4/sys/ipc.h>
698 #include <l4/sys/types.h>
699
700 L4_INLINE l4_msgtag_t
l4_thread_ex_regs_u(l4_cap_idx_t thread,l4_addr_t ip,l4_addr_t sp,l4_umword_t flags,l4_utcb_t * utcb)701 l4_thread_ex_regs_u(l4_cap_idx_t thread, l4_addr_t ip, l4_addr_t sp,
702 l4_umword_t flags, l4_utcb_t *utcb) L4_NOTHROW
703 {
704 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
705 v->mr[0] = L4_THREAD_EX_REGS_OP | flags;
706 v->mr[1] = ip;
707 v->mr[2] = sp;
708 return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 3, 0, 0), L4_IPC_NEVER);
709 }
710
711 L4_INLINE l4_msgtag_t
l4_thread_ex_regs_ret_u(l4_cap_idx_t thread,l4_addr_t * ip,l4_addr_t * sp,l4_umword_t * flags,l4_utcb_t * utcb)712 l4_thread_ex_regs_ret_u(l4_cap_idx_t thread, l4_addr_t *ip, l4_addr_t *sp,
713 l4_umword_t *flags, l4_utcb_t *utcb) L4_NOTHROW
714 {
715 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
716 l4_msgtag_t ret = l4_thread_ex_regs_u(thread, *ip, *sp, *flags, utcb);
717 if (l4_error_u(ret, utcb))
718 return ret;
719
720 *flags = v->mr[0];
721 *ip = v->mr[1];
722 *sp = v->mr[2];
723 return ret;
724 }
725
726 L4_INLINE void
l4_thread_control_start_u(l4_utcb_t * utcb)727 l4_thread_control_start_u(l4_utcb_t *utcb) L4_NOTHROW
728 {
729 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
730 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] = L4_THREAD_CONTROL_OP;
731 }
732
733 L4_INLINE void
l4_thread_control_pager_u(l4_cap_idx_t pager,l4_utcb_t * utcb)734 l4_thread_control_pager_u(l4_cap_idx_t pager, l4_utcb_t *utcb) L4_NOTHROW
735 {
736 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
737 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] |= L4_THREAD_CONTROL_SET_PAGER;
738 v->mr[L4_THREAD_CONTROL_MR_IDX_PAGER] = pager;
739 }
740
741 L4_INLINE void
l4_thread_control_exc_handler_u(l4_cap_idx_t exc_handler,l4_utcb_t * utcb)742 l4_thread_control_exc_handler_u(l4_cap_idx_t exc_handler,
743 l4_utcb_t *utcb) L4_NOTHROW
744 {
745 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
746 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] |= L4_THREAD_CONTROL_SET_EXC_HANDLER;
747 v->mr[L4_THREAD_CONTROL_MR_IDX_EXC_HANDLER] = exc_handler;
748 }
749
750 L4_INLINE void
l4_thread_control_bind_u(l4_utcb_t * thread_utcb,l4_cap_idx_t task,l4_utcb_t * utcb)751 l4_thread_control_bind_u(l4_utcb_t *thread_utcb, l4_cap_idx_t task,
752 l4_utcb_t *utcb) L4_NOTHROW
753 {
754 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
755 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] |= L4_THREAD_CONTROL_BIND_TASK;
756 v->mr[L4_THREAD_CONTROL_MR_IDX_BIND_UTCB] = (l4_addr_t)thread_utcb;
757 v->mr[L4_THREAD_CONTROL_MR_IDX_BIND_TASK] = L4_ITEM_MAP;
758 v->mr[L4_THREAD_CONTROL_MR_IDX_BIND_TASK + 1] = l4_obj_fpage(task, 0, L4_CAP_FPAGE_RWS).raw;
759 }
760
761 L4_INLINE void
l4_thread_control_alien_u(l4_utcb_t * utcb,int on)762 l4_thread_control_alien_u(l4_utcb_t *utcb, int on) L4_NOTHROW
763 {
764 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
765 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] |= L4_THREAD_CONTROL_ALIEN;
766 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAG_VALS] |= on ? L4_THREAD_CONTROL_ALIEN : 0;
767 }
768
769 L4_INLINE void
l4_thread_control_ux_host_syscall_u(l4_utcb_t * utcb,int on)770 l4_thread_control_ux_host_syscall_u(l4_utcb_t *utcb, int on) L4_NOTHROW
771 {
772 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
773 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] |= L4_THREAD_CONTROL_UX_NATIVE;
774 v->mr[L4_THREAD_CONTROL_MR_IDX_FLAG_VALS] |= on ? L4_THREAD_CONTROL_UX_NATIVE : 0;
775 }
776
777 L4_INLINE l4_msgtag_t
l4_thread_control_commit_u(l4_cap_idx_t thread,l4_utcb_t * utcb)778 l4_thread_control_commit_u(l4_cap_idx_t thread, l4_utcb_t *utcb) L4_NOTHROW
779 {
780 int items = 0;
781 if (l4_utcb_mr_u(utcb)->mr[L4_THREAD_CONTROL_MR_IDX_FLAGS] & L4_THREAD_CONTROL_BIND_TASK)
782 items = 1;
783 return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 6, items, 0), L4_IPC_NEVER);
784 }
785
786
787 L4_INLINE l4_msgtag_t
l4_thread_yield(void)788 l4_thread_yield(void) L4_NOTHROW
789 {
790 l4_ipc_receive(L4_INVALID_CAP, NULL, L4_IPC_BOTH_TIMEOUT_0);
791 return l4_msgtag(0, 0, 0, 0);
792 }
793
794 /* Preliminary, to be changed */
795 L4_INLINE l4_msgtag_t
l4_thread_switch_u(l4_cap_idx_t to_thread,l4_utcb_t * utcb)796 l4_thread_switch_u(l4_cap_idx_t to_thread, l4_utcb_t *utcb) L4_NOTHROW
797 {
798 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
799 v->mr[0] = L4_THREAD_SWITCH_OP;
800 return l4_ipc_call(to_thread, utcb, l4_msgtag(L4_PROTO_THREAD, 1, 0, 0), L4_IPC_NEVER);
801 }
802
803
804 L4_INLINE l4_msgtag_t
l4_thread_stats_time_u(l4_cap_idx_t thread,l4_kernel_clock_t * us,l4_utcb_t * utcb)805 l4_thread_stats_time_u(l4_cap_idx_t thread, l4_kernel_clock_t *us,
806 l4_utcb_t *utcb) L4_NOTHROW
807 {
808 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
809 l4_msgtag_t res;
810
811 v->mr[0] = L4_THREAD_STATS_OP;
812
813 res = l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 1, 0, 0), L4_IPC_NEVER);
814
815 if (l4_msgtag_has_error(res))
816 return res;
817
818 *us = v->mr64[l4_utcb_mr64_idx(0)];
819
820 return res;
821 }
822
823 L4_INLINE l4_msgtag_t
l4_thread_vcpu_resume_start_u(l4_utcb_t * utcb)824 l4_thread_vcpu_resume_start_u(l4_utcb_t *utcb) L4_NOTHROW
825 {
826 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
827 v->mr[0] = L4_THREAD_VCPU_RESUME_OP;
828 return l4_msgtag(L4_PROTO_THREAD, 1, 0, 0);
829 }
830
831 L4_INLINE l4_msgtag_t
l4_thread_vcpu_resume_commit_u(l4_cap_idx_t thread,l4_msgtag_t tag,l4_utcb_t * utcb)832 l4_thread_vcpu_resume_commit_u(l4_cap_idx_t thread,
833 l4_msgtag_t tag, l4_utcb_t *utcb) L4_NOTHROW
834 {
835 return l4_ipc_call(thread, utcb, tag, L4_IPC_NEVER);
836 }
837
838 L4_INLINE l4_msgtag_t
l4_thread_ex_regs(l4_cap_idx_t thread,l4_addr_t ip,l4_addr_t sp,l4_umword_t flags)839 l4_thread_ex_regs(l4_cap_idx_t thread, l4_addr_t ip, l4_addr_t sp,
840 l4_umword_t flags) L4_NOTHROW
841 {
842 return l4_thread_ex_regs_u(thread, ip, sp, flags, l4_utcb());
843 }
844
845 L4_INLINE l4_msgtag_t
l4_thread_ex_regs_ret(l4_cap_idx_t thread,l4_addr_t * ip,l4_addr_t * sp,l4_umword_t * flags)846 l4_thread_ex_regs_ret(l4_cap_idx_t thread, l4_addr_t *ip, l4_addr_t *sp,
847 l4_umword_t *flags) L4_NOTHROW
848 {
849 return l4_thread_ex_regs_ret_u(thread, ip, sp, flags, l4_utcb());
850 }
851
852 L4_INLINE void
l4_thread_control_start(void)853 l4_thread_control_start(void) L4_NOTHROW
854 {
855 l4_thread_control_start_u(l4_utcb());
856 }
857
858 L4_INLINE void
l4_thread_control_pager(l4_cap_idx_t pager)859 l4_thread_control_pager(l4_cap_idx_t pager) L4_NOTHROW
860 {
861 l4_thread_control_pager_u(pager, l4_utcb());
862 }
863
864 L4_INLINE void
l4_thread_control_exc_handler(l4_cap_idx_t exc_handler)865 l4_thread_control_exc_handler(l4_cap_idx_t exc_handler) L4_NOTHROW
866 {
867 l4_thread_control_exc_handler_u(exc_handler, l4_utcb());
868 }
869
870
871 L4_INLINE void
l4_thread_control_bind(l4_utcb_t * thread_utcb,l4_cap_idx_t task)872 l4_thread_control_bind(l4_utcb_t *thread_utcb, l4_cap_idx_t task) L4_NOTHROW
873 {
874 l4_thread_control_bind_u(thread_utcb, task, l4_utcb());
875 }
876
877 L4_INLINE void
l4_thread_control_alien(int on)878 l4_thread_control_alien(int on) L4_NOTHROW
879 {
880 l4_thread_control_alien_u(l4_utcb(), on);
881 }
882
883 L4_INLINE void
l4_thread_control_ux_host_syscall(int on)884 l4_thread_control_ux_host_syscall(int on) L4_NOTHROW
885 {
886 l4_thread_control_ux_host_syscall_u(l4_utcb(), on);
887 }
888
889 L4_INLINE l4_msgtag_t
l4_thread_control_commit(l4_cap_idx_t thread)890 l4_thread_control_commit(l4_cap_idx_t thread) L4_NOTHROW
891 {
892 return l4_thread_control_commit_u(thread, l4_utcb());
893 }
894
895
896
897
898 L4_INLINE l4_msgtag_t
l4_thread_switch(l4_cap_idx_t to_thread)899 l4_thread_switch(l4_cap_idx_t to_thread) L4_NOTHROW
900 {
901 return l4_thread_switch_u(to_thread, l4_utcb());
902 }
903
904
905
906
907 L4_INLINE l4_msgtag_t
l4_thread_stats_time(l4_cap_idx_t thread,l4_kernel_clock_t * us)908 l4_thread_stats_time(l4_cap_idx_t thread, l4_kernel_clock_t *us) L4_NOTHROW
909 {
910 return l4_thread_stats_time_u(thread, us, l4_utcb());
911 }
912
913 L4_INLINE l4_msgtag_t
l4_thread_vcpu_resume_start(void)914 l4_thread_vcpu_resume_start(void) L4_NOTHROW
915 {
916 return l4_thread_vcpu_resume_start_u(l4_utcb());
917 }
918
919 L4_INLINE l4_msgtag_t
l4_thread_vcpu_resume_commit(l4_cap_idx_t thread,l4_msgtag_t tag)920 l4_thread_vcpu_resume_commit(l4_cap_idx_t thread,
921 l4_msgtag_t tag) L4_NOTHROW
922 {
923 return l4_thread_vcpu_resume_commit_u(thread, tag, l4_utcb());
924 }
925
926
927 L4_INLINE l4_msgtag_t
l4_thread_register_del_irq_u(l4_cap_idx_t thread,l4_cap_idx_t irq,l4_utcb_t * u)928 l4_thread_register_del_irq_u(l4_cap_idx_t thread, l4_cap_idx_t irq,
929 l4_utcb_t *u) L4_NOTHROW
930 {
931 l4_msg_regs_t *m = l4_utcb_mr_u(u);
932 m->mr[0] = L4_THREAD_REGISTER_DELETE_IRQ_OP;
933 m->mr[1] = l4_map_obj_control(0,0);
934 m->mr[2] = l4_obj_fpage(irq, 0, L4_CAP_FPAGE_RWS).raw;
935 return l4_ipc_call(thread, u, l4_msgtag(L4_PROTO_THREAD, 1, 1, 0), L4_IPC_NEVER);
936
937 }
938
939 L4_INLINE l4_msgtag_t
l4_thread_register_del_irq(l4_cap_idx_t thread,l4_cap_idx_t irq)940 l4_thread_register_del_irq(l4_cap_idx_t thread, l4_cap_idx_t irq) L4_NOTHROW
941 {
942 return l4_thread_register_del_irq_u(thread, irq, l4_utcb());
943 }
944
945
946 L4_INLINE l4_msgtag_t
l4_thread_vcpu_control_u(l4_cap_idx_t thread,l4_addr_t vcpu_state,l4_utcb_t * utcb)947 l4_thread_vcpu_control_u(l4_cap_idx_t thread, l4_addr_t vcpu_state,
948 l4_utcb_t *utcb) L4_NOTHROW
949 {
950 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
951 v->mr[0] = L4_THREAD_VCPU_CONTROL_OP;
952 v->mr[1] = vcpu_state;
953 return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 2, 0, 0), L4_IPC_NEVER);
954 }
955
956 L4_INLINE l4_msgtag_t
l4_thread_vcpu_control(l4_cap_idx_t thread,l4_addr_t vcpu_state)957 l4_thread_vcpu_control(l4_cap_idx_t thread, l4_addr_t vcpu_state) L4_NOTHROW
958 { return l4_thread_vcpu_control_u(thread, vcpu_state, l4_utcb()); }
959
960
961 L4_INLINE l4_msgtag_t
l4_thread_vcpu_control_ext_u(l4_cap_idx_t thread,l4_addr_t ext_vcpu_state,l4_utcb_t * utcb)962 l4_thread_vcpu_control_ext_u(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state,
963 l4_utcb_t *utcb) L4_NOTHROW
964 {
965 l4_msg_regs_t *v = l4_utcb_mr_u(utcb);
966 v->mr[0] = L4_THREAD_VCPU_CONTROL_EXT_OP;
967 v->mr[1] = ext_vcpu_state;
968 return l4_ipc_call(thread, utcb, l4_msgtag(L4_PROTO_THREAD, 2, 0, 0), L4_IPC_NEVER);
969 }
970
971 L4_INLINE l4_msgtag_t
l4_thread_vcpu_control_ext(l4_cap_idx_t thread,l4_addr_t ext_vcpu_state)972 l4_thread_vcpu_control_ext(l4_cap_idx_t thread, l4_addr_t ext_vcpu_state) L4_NOTHROW
973 { return l4_thread_vcpu_control_ext_u(thread, ext_vcpu_state, l4_utcb()); }
974
975 L4_INLINE l4_msgtag_t
l4_thread_modify_sender_start_u(l4_utcb_t * u)976 l4_thread_modify_sender_start_u(l4_utcb_t *u) L4_NOTHROW
977 {
978 l4_msg_regs_t *m = l4_utcb_mr_u(u);
979 m->mr[0] = L4_THREAD_MODIFY_SENDER_OP;
980 return l4_msgtag(L4_PROTO_THREAD, 1, 0, 0);
981 }
982
983 L4_INLINE int
l4_thread_modify_sender_add_u(l4_umword_t match_mask,l4_umword_t match,l4_umword_t del_bits,l4_umword_t add_bits,l4_msgtag_t * tag,l4_utcb_t * u)984 l4_thread_modify_sender_add_u(l4_umword_t match_mask,
985 l4_umword_t match,
986 l4_umword_t del_bits,
987 l4_umword_t add_bits,
988 l4_msgtag_t *tag, l4_utcb_t *u) L4_NOTHROW
989 {
990 l4_msg_regs_t *m = l4_utcb_mr_u(u);
991 unsigned w = l4_msgtag_words(*tag);
992 if (w >= L4_UTCB_GENERIC_DATA_SIZE - 4)
993 return -L4_ENOMEM;
994
995 m->mr[w] = match_mask;
996 m->mr[w+1] = match;
997 m->mr[w+2] = del_bits;
998 m->mr[w+3] = add_bits;
999
1000 *tag = l4_msgtag(l4_msgtag_label(*tag), w + 4, 0, 0);
1001
1002 return 0;
1003 }
1004
1005 L4_INLINE l4_msgtag_t
l4_thread_modify_sender_commit_u(l4_cap_idx_t thread,l4_msgtag_t tag,l4_utcb_t * u)1006 l4_thread_modify_sender_commit_u(l4_cap_idx_t thread, l4_msgtag_t tag,
1007 l4_utcb_t *u) L4_NOTHROW
1008 {
1009 return l4_ipc_call(thread, u, tag, L4_IPC_NEVER);
1010 }
1011
1012 L4_INLINE l4_msgtag_t
l4_thread_modify_sender_start(void)1013 l4_thread_modify_sender_start(void) L4_NOTHROW
1014 {
1015 return l4_thread_modify_sender_start_u(l4_utcb());
1016 }
1017
1018 L4_INLINE int
l4_thread_modify_sender_add(l4_umword_t match_mask,l4_umword_t match,l4_umword_t del_bits,l4_umword_t add_bits,l4_msgtag_t * tag)1019 l4_thread_modify_sender_add(l4_umword_t match_mask,
1020 l4_umword_t match,
1021 l4_umword_t del_bits,
1022 l4_umword_t add_bits,
1023 l4_msgtag_t *tag) L4_NOTHROW
1024 {
1025 return l4_thread_modify_sender_add_u(match_mask, match,
1026 del_bits, add_bits, tag, l4_utcb());
1027 }
1028
1029 L4_INLINE l4_msgtag_t
l4_thread_modify_sender_commit(l4_cap_idx_t thread,l4_msgtag_t tag)1030 l4_thread_modify_sender_commit(l4_cap_idx_t thread, l4_msgtag_t tag) L4_NOTHROW
1031 {
1032 return l4_thread_modify_sender_commit_u(thread, tag, l4_utcb());
1033 }
1034