1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #pragma once
8
9 #include <config.h>
10 #include <api/types.h>
11 #include <stdint.h>
12 #include <arch/object/structures_gen.h>
13 #include <mode/types.h>
14 #include <sel4/macros.h>
15 #include <sel4/arch/constants.h>
16 #include <sel4/sel4_arch/constants.h>
17 #include <benchmark/benchmark_utilisation_.h>
18
19 enum irq_state {
20 IRQInactive = 0,
21 IRQSignal = 1,
22 IRQTimer = 2,
23 #ifdef ENABLE_SMP_SUPPORT
24 IRQIPI = 3,
25 #endif
26 IRQReserved
27 };
28 typedef word_t irq_state_t;
29
30 typedef struct dschedule {
31 dom_t domain;
32 word_t length;
33 } dschedule_t;
34
35 enum asidSizeConstants {
36 asidHighBits = seL4_NumASIDPoolsBits,
37 asidLowBits = seL4_ASIDPoolIndexBits
38 };
39
40 /* Arch-independent object types */
41 enum endpoint_state {
42 EPState_Idle = 0,
43 EPState_Send = 1,
44 EPState_Recv = 2
45 };
46 typedef word_t endpoint_state_t;
47
48 enum notification_state {
49 NtfnState_Idle = 0,
50 NtfnState_Waiting = 1,
51 NtfnState_Active = 2
52 };
53 typedef word_t notification_state_t;
54
55 #define EP_PTR(r) ((endpoint_t *)(r))
56 #define EP_REF(p) ((word_t)(p))
57
58 #define NTFN_PTR(r) ((notification_t *)(r))
59 #define NTFN_REF(p) ((word_t)(p))
60
61 #define CTE_PTR(r) ((cte_t *)(r))
62 #define CTE_REF(p) ((word_t)(p))
63
64 #define CNODE_MIN_BITS 1
65 #define CNODE_PTR(r) (CTE_PTR(r))
66 #define CNODE_REF(p) (CTE_REF(p)>>CNODE_MIN_BITS)
67
68 // We would like the actual 'tcb' region (the portion that contains the tcb_t) of the tcb
69 // to be as large as possible, but it still needs to be aligned. As the TCB object contains
70 // two sub objects the largest we can make either sub object whilst preserving size alignment
71 // is half the total size. To halve an object size defined in bits we just subtract 1
72 //
73 // A diagram of a TCB kernel object that is created from untyped:
74 // _______________________________________
75 // | | | |
76 // | | | |
77 // |cte_t| unused | tcb_t |
78 // | |(debug_tcb_t)| |
79 // |_____|_____________|___________________|
80 // 0 a b c
81 // a = tcbCNodeEntries * sizeof(cte_t)
82 // b = BIT(TCB_SIZE_BITS)
83 // c = BIT(seL4_TCBBits)
84 //
85 #define TCB_SIZE_BITS (seL4_TCBBits - 1)
86
87 #define TCB_CNODE_SIZE_BITS (TCB_CNODE_RADIX + seL4_SlotBits)
88 #define TCB_CNODE_RADIX 4
89 #define TCB_OFFSET BIT(TCB_SIZE_BITS)
90
91 /* Generate a tcb_t or cte_t pointer from a tcb block reference */
92 #define TCB_PTR(r) ((tcb_t *)(r))
93 #define TCB_CTE_PTR(r,i) (((cte_t *)(r))+(i))
94 #define TCB_REF(p) ((word_t)(p))
95
96 /* Generate a cte_t pointer from a tcb_t pointer */
97 #define TCB_PTR_CTE_PTR(p,i) \
98 (((cte_t *)((word_t)(p)&~MASK(seL4_TCBBits)))+(i))
99
100 #define SC_REF(p) ((word_t) (p))
101 #define SC_PTR(r) ((sched_context_t *) (r))
102
103 #define REPLY_REF(p) ((word_t) (p))
104 #define REPLY_PTR(r) ((reply_t *) (r))
105
106 #define WORD_PTR(r) ((word_t *)(r))
107 #define WORD_REF(p) ((word_t)(p))
108
109 #define ZombieType_ZombieTCB BIT(wordRadix)
110 #define ZombieType_ZombieCNode(n) ((n) & MASK(wordRadix))
111
Zombie_new(word_t number,word_t type,word_t ptr)112 static inline cap_t CONST Zombie_new(word_t number, word_t type, word_t ptr)
113 {
114 word_t mask;
115
116 if (type == ZombieType_ZombieTCB) {
117 mask = MASK(TCB_CNODE_RADIX + 1);
118 } else {
119 mask = MASK(type + 1);
120 }
121
122 return cap_zombie_cap_new((ptr & ~mask) | (number & mask), type);
123 }
124
cap_zombie_cap_get_capZombieBits(cap_t cap)125 static inline word_t CONST cap_zombie_cap_get_capZombieBits(cap_t cap)
126 {
127 word_t type = cap_zombie_cap_get_capZombieType(cap);
128 if (type == ZombieType_ZombieTCB) {
129 return TCB_CNODE_RADIX;
130 }
131 return ZombieType_ZombieCNode(type); /* cnode radix */
132 }
133
cap_zombie_cap_get_capZombieNumber(cap_t cap)134 static inline word_t CONST cap_zombie_cap_get_capZombieNumber(cap_t cap)
135 {
136 word_t radix = cap_zombie_cap_get_capZombieBits(cap);
137 return cap_zombie_cap_get_capZombieID(cap) & MASK(radix + 1);
138 }
139
cap_zombie_cap_get_capZombiePtr(cap_t cap)140 static inline word_t CONST cap_zombie_cap_get_capZombiePtr(cap_t cap)
141 {
142 word_t radix = cap_zombie_cap_get_capZombieBits(cap);
143 return cap_zombie_cap_get_capZombieID(cap) & ~MASK(radix + 1);
144 }
145
cap_zombie_cap_set_capZombieNumber(cap_t cap,word_t n)146 static inline cap_t CONST cap_zombie_cap_set_capZombieNumber(cap_t cap, word_t n)
147 {
148 word_t radix = cap_zombie_cap_get_capZombieBits(cap);
149 word_t ptr = cap_zombie_cap_get_capZombieID(cap) & ~MASK(radix + 1);
150 return cap_zombie_cap_set_capZombieID(cap, ptr | (n & MASK(radix + 1)));
151 }
152
153 /* Capability table entry (CTE) */
154 struct cte {
155 cap_t cap;
156 mdb_node_t cteMDBNode;
157 };
158 typedef struct cte cte_t;
159
160 #define nullMDBNode mdb_node_new(0, false, false, 0)
161
162 /* Thread state */
163 enum _thread_state {
164 ThreadState_Inactive = 0,
165 ThreadState_Running,
166 ThreadState_Restart,
167 ThreadState_BlockedOnReceive,
168 ThreadState_BlockedOnSend,
169 ThreadState_BlockedOnReply,
170 ThreadState_BlockedOnNotification,
171 #ifdef CONFIG_VTX
172 ThreadState_RunningVM,
173 #endif
174 ThreadState_IdleThreadState
175 };
176 typedef word_t _thread_state_t;
177
178 /* A TCB CNode and a TCB are always allocated together, and adjacently.
179 * The CNode comes first. */
180 enum tcb_cnode_index {
181 /* CSpace root */
182 tcbCTable = 0,
183
184 /* VSpace root */
185 tcbVTable = 1,
186
187 #ifdef CONFIG_KERNEL_MCS
188 /* IPC buffer cap slot */
189 tcbBuffer = 2,
190
191 /* Fault endpoint slot */
192 tcbFaultHandler = 3,
193
194 /* Timeout endpoint slot */
195 tcbTimeoutHandler = 4,
196 #else
197 /* Reply cap slot */
198 tcbReply = 2,
199
200 /* TCB of most recent IPC sender */
201 tcbCaller = 3,
202
203 /* IPC buffer cap slot */
204 tcbBuffer = 4,
205 #endif
206 tcbCNodeEntries
207 };
208 typedef word_t tcb_cnode_index_t;
209
210 #include <arch/object/structures.h>
211
212 struct user_data {
213 word_t words[BIT(seL4_PageBits) / sizeof(word_t)];
214 };
215 typedef struct user_data user_data_t;
216
217 struct user_data_device {
218 word_t words[BIT(seL4_PageBits) / sizeof(word_t)];
219 };
220 typedef struct user_data_device user_data_device_t;
221
wordFromVMRights(vm_rights_t vm_rights)222 static inline word_t CONST wordFromVMRights(vm_rights_t vm_rights)
223 {
224 return (word_t)vm_rights;
225 }
226
vmRightsFromWord(word_t w)227 static inline vm_rights_t CONST vmRightsFromWord(word_t w)
228 {
229 return (vm_rights_t)w;
230 }
231
vmAttributesFromWord(word_t w)232 static inline vm_attributes_t CONST vmAttributesFromWord(word_t w)
233 {
234 vm_attributes_t attr;
235
236 attr.words[0] = w;
237 return attr;
238 }
239
240 #ifdef CONFIG_KERNEL_MCS
241 typedef struct sched_context sched_context_t;
242 typedef struct reply reply_t;
243 #endif
244
245 /* TCB: size >= 18 words + sizeof(arch_tcb_t) + 1 word on MCS (aligned to nearest power of 2) */
246 struct tcb {
247 /* arch specific tcb state (including context)*/
248 arch_tcb_t tcbArch;
249
250 /* Thread state, 3 words */
251 thread_state_t tcbState;
252
253 /* Notification that this TCB is bound to. If this is set, when this TCB waits on
254 * any sync endpoint, it may receive a signal from a Notification object.
255 * 1 word*/
256 notification_t *tcbBoundNotification;
257
258 /* Current fault, 2 words */
259 seL4_Fault_t tcbFault;
260
261 /* Current lookup failure, 2 words */
262 lookup_fault_t tcbLookupFailure;
263
264 /* Domain, 1 byte (padded to 1 word) */
265 dom_t tcbDomain;
266
267 /* maximum controlled priority, 1 byte (padded to 1 word) */
268 prio_t tcbMCP;
269
270 /* Priority, 1 byte (padded to 1 word) */
271 prio_t tcbPriority;
272
273 #ifdef CONFIG_KERNEL_MCS
274 /* scheduling context that this tcb is running on, if it is NULL the tcb cannot
275 * be in the scheduler queues, 1 word */
276 sched_context_t *tcbSchedContext;
277
278 /* scheduling context that this tcb yielded to */
279 sched_context_t *tcbYieldTo;
280 #else
281 /* Timeslice remaining, 1 word */
282 word_t tcbTimeSlice;
283
284 /* Capability pointer to thread fault handler, 1 word */
285 cptr_t tcbFaultHandler;
286 #endif
287
288 /* userland virtual address of thread IPC buffer, 1 word */
289 word_t tcbIPCBuffer;
290
291 #ifdef ENABLE_SMP_SUPPORT
292 /* cpu ID this thread is running on, 1 word */
293 word_t tcbAffinity;
294 #endif /* ENABLE_SMP_SUPPORT */
295
296 /* Previous and next pointers for scheduler queues , 2 words */
297 struct tcb *tcbSchedNext;
298 struct tcb *tcbSchedPrev;
299 /* Preivous and next pointers for endpoint and notification queues, 2 words */
300 struct tcb *tcbEPNext;
301 struct tcb *tcbEPPrev;
302
303 #ifdef CONFIG_KERNEL_MCS
304 /* if tcb is in a call, pointer to the reply object, 1 word */
305 reply_t *tcbReply;
306 #endif
307 #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
308 /* 16 bytes (12 bytes aarch32) */
309 benchmark_util_t benchmark;
310 #endif
311 };
312 typedef struct tcb tcb_t;
313
314 #ifdef CONFIG_DEBUG_BUILD
315 /* This debug_tcb object is inserted into the 'unused' region of a TCB object
316 for debug build configurations. */
317 struct debug_tcb {
318
319 /* Pointers for list of all tcbs that is maintained
320 * when CONFIG_DEBUG_BUILD is enabled, 2 words */
321 struct tcb *tcbDebugNext;
322 struct tcb *tcbDebugPrev;
323 /* Use any remaining space for a thread name */
324 char tcbName[];
325
326 };
327 typedef struct debug_tcb debug_tcb_t;
328
329 #define TCB_PTR_DEBUG_PTR(p) ((debug_tcb_t *)TCB_PTR_CTE_PTR(p,tcbArchCNodeEntries))
330 #endif /* CONFIG_DEBUG_BUILD */
331
332 #ifdef CONFIG_KERNEL_MCS
333 typedef struct refill {
334 /* Absolute timestamp from when this refill can be used */
335 ticks_t rTime;
336 /* Amount of ticks that can be used from this refill */
337 ticks_t rAmount;
338 } refill_t;
339
340 #define MIN_REFILLS 2u
341
342 struct sched_context {
343 /* period for this sc -- controls rate at which budget is replenished */
344 ticks_t scPeriod;
345
346 /* amount of ticks this sc has been scheduled for since seL4_SchedContext_Consumed
347 * was last called or a timeout exception fired */
348 ticks_t scConsumed;
349
350 /* core this scheduling context provides time for - 0 if uniprocessor */
351 word_t scCore;
352
353 /* thread that this scheduling context is bound to */
354 tcb_t *scTcb;
355
356 /* if this is not NULL, it points to the last reply object that was generated
357 * when the scheduling context was passed over a Call */
358 reply_t *scReply;
359
360 /* notification this scheduling context is bound to */
361 notification_t *scNotification;
362
363 /* data word that is sent with timeout faults that occur on this scheduling context */
364 word_t scBadge;
365
366 /* thread that yielded to this scheduling context */
367 tcb_t *scYieldFrom;
368
369 /* Amount of refills this sc tracks */
370 word_t scRefillMax;
371 /* Index of the head of the refill circular buffer */
372 word_t scRefillHead;
373 /* Index of the tail of the refill circular buffer */
374 word_t scRefillTail;
375
376 /* Whether to apply constant-bandwidth/sliding-window constraint
377 * rather than only sporadic server constraints */
378 bool_t scSporadic;
379 };
380
381 struct reply {
382 /* TCB pointed to by this reply object. This pointer reflects two possible relations, depending
383 * on the thread state.
384 *
385 * ThreadState_BlockedOnReply: this tcb is the caller that is blocked on this reply object,
386 * ThreadState_BlockedOnRecv: this tcb is the callee blocked on an endpoint with this reply object.
387 *
388 * The back pointer for this TCB is stored in the thread state.*/
389 tcb_t *replyTCB;
390
391 /* 0 if this is the start of the call chain, or points to the
392 * previous reply object in a call chain */
393 call_stack_t replyPrev;
394
395 /* Either a scheduling context if this reply object is the head of the call chain
396 * (the last caller before the server) or another reply object. 0 if no scheduling
397 * context was passed along the call chain */
398 call_stack_t replyNext;
399 };
400 #endif
401
402 /* Ensure object sizes are sane */
403 compile_assert(cte_size_sane, sizeof(cte_t) <= BIT(seL4_SlotBits))
404 compile_assert(tcb_cte_size_sane, TCB_CNODE_SIZE_BITS <= TCB_SIZE_BITS)
405 compile_assert(tcb_size_sane,
406 BIT(TCB_SIZE_BITS) >= sizeof(tcb_t))
407 compile_assert(tcb_size_not_excessive,
408 BIT(TCB_SIZE_BITS - 1) < sizeof(tcb_t))
409 compile_assert(ep_size_sane, sizeof(endpoint_t) <= BIT(seL4_EndpointBits))
410 compile_assert(notification_size_sane, sizeof(notification_t) <= BIT(seL4_NotificationBits))
411
412 /* Check the IPC buffer is the right size */
413 compile_assert(ipc_buf_size_sane, sizeof(seL4_IPCBuffer) == BIT(seL4_IPCBufferSizeBits))
414 #ifdef CONFIG_KERNEL_MCS
415 compile_assert(sc_core_size_sane, (sizeof(sched_context_t) + MIN_REFILLS *sizeof(refill_t) <=
416 seL4_CoreSchedContextBytes))
417 compile_assert(reply_size_sane, sizeof(reply_t) <= BIT(seL4_ReplyBits))
418 compile_assert(refill_size_sane, (sizeof(refill_t) == seL4_RefillSizeBytes))
419 #endif
420
421 /* helper functions */
422
423 static inline word_t CONST
isArchCap(cap_t cap)424 isArchCap(cap_t cap)
425 {
426 return (cap_get_capType(cap) % 2);
427 }
428
429