1 /*
2 * Copyright (c) 2017 Oticon A/S
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /*
9 * Native simulator, CPU Thread emulation (nct)
10 */
11
12 /**
13 * Native simulator single CPU threading emulation,
14 * an *optional* module provided by the Native simulator
15 * the hosted embedded OS / SW can use to emulate the threading
16 * context switching which would be handled by a OS CPU AL
17 *
18 * Principle of operation:
19 *
20 * The embedded OS threads are run as a set of native Linux pthreads.
21 * The embedded OS only sees one of this threads executing at a time.
22 *
23 * The hosted OS (or its integration into the native simulator) shall call
24 * nct_init() to initialize the state of an instance of this module, and
25 * nct_clean_up() once it desires to destroy it.
26 *
27 * For SOCs with several micro-controllers (AMP) one instance of this module
28 * would be instantiated per simulated uC and embedded OS.
29 *
30 * To create a new embedded thread, the hosted OS shall call nct_new_thread().
31 * To swap to a thread nct_swap_threads(), and to terminate a thread
32 * nct_abort_thread().
33 * The hosted OS can optionally use nct_first_thread_start() to swap
34 * to the "first thread".
35 *
36 * Whenever a thread calls nct_swap_threads(next_thread_idx) it will be blocked,
37 * and the thread identified by next_thread_idx will continue executing.
38 *
39 *
40 * Internal design:
41 *
42 * Which thread is running is controlled using its own semaphore.
43 *
44 * The main part of the execution of each thread will occur in a fully
45 * synchronous and deterministic manner, and only when commanded by
46 * the embedded operating system kernel.
47 *
48 * The creation of a thread will spawn a new pthread whose start
49 * is asynchronous to the rest, until synchronized in nct_wait_until_allowed()
50 * below.
51 * Similarly aborting and canceling threads execute a tail in a quite an
52 * asynchronous manner.
53 *
54 * This implementation is meant to be portable in between fully compatible
55 * POSIX systems.
56 * A table (threads_table) is used to abstract the native pthreads.
57 * An index in this table is used to identify threads in the IF to the
58 * embedded OS.
59 */
60
61 #define NCT_DEBUG_PRINTS 0
62
63 /* For pthread_setname_np() */
64 #define _GNU_SOURCE
65 #include <stdbool.h>
66 #include <stdlib.h>
67 #include <string.h>
68 #include <stdint.h>
69 #include <pthread.h>
70 #include <semaphore.h>
71 #include <errno.h>
72 #include "nsi_utils.h"
73 #include "nct_if.h"
74 #include "nsi_internal.h"
75 #include "nsi_safe_call.h"
76
77 #if NCT_DEBUG_PRINTS
78 #define NCT_DEBUG(fmt, ...) nsi_print_trace(PREFIX fmt, __VA_ARGS__)
79 #else
80 #define NCT_DEBUG(...)
81 #endif
82
83 #define PREFIX "Tread Simulator: "
84 #define ERPREFIX PREFIX"error on "
85 #define NO_MEM_ERR PREFIX"Can't allocate memory\n"
86
87 #define NCT_ENABLE_CANCEL 1
88 #define NCT_ALLOC_CHUNK_SIZE 64 /* In how big chunks we grow the thread table */
89 #define NCT_REUSE_ABORTED_ENTRIES 0
90 /* For the Zephyr OS, tests/kernel/threads/scheduling/schedule_api fails when setting
91 * NCT_REUSE_ABORTED_ENTRIES => don't set it by now
92 */
93
94 struct nct_status_t;
95
96 struct threads_table_el {
97 /* Pointer to the overall status of the threading emulator instance */
98 struct nct_status_t *nct_status;
99 struct threads_table_el *next; /* Pointer to the next element of the table */
100 sem_t sema; /* Semaphore to hold this thread until allowed */
101 pthread_t thread; /* Actual pthread_t as returned by the native kernel */
102
103 int thread_idx; /* Index of this element in the threads_table*/
104 int thead_cnt; /* For debugging: Unique, consecutive, thread number */
105
106 enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
107 bool running; /* (For debugging purposes) Is this the currently running thread */
108
109 /*
110 * Pointer to data from the hosted OS architecture.
111 * What that is, if anything, is up to that the hosted OS
112 */
113 void *payload;
114 };
115
116 struct nct_status_t {
117 struct threads_table_el *threads_table; /* Pointer to the threads table */
118 int thread_create_count; /* (For debugging) Thread creation counter */
119 int threads_table_size; /* Size of threads_table */
120 /* Pointer to the hosted OS function to be called when a thread is started */
121 void (*fptr)(void *payload);
122
123 /* Index of the thread which is currently allowed to run now */
124 int currently_allowed_thread;
125
126 bool terminate; /* Are we terminating the program == cleaning up */
127 bool all_threads_released; /* During termination, have we released all hosted threads */
128 };
129
130 static struct threads_table_el *ttable_get_element(struct nct_status_t *this, int index);
131
132 /**
133 * Helper function, run by a thread which is being ended
134 */
nct_exit_this_thread(void)135 static void nct_exit_this_thread(void)
136 {
137 /* We detach ourselves so nobody needs to join to us */
138 pthread_detach(pthread_self());
139 pthread_exit(NULL);
140 }
141
142 /*
143 * Wait for the semaphore, retrying if we are interrupted by a signal
144 */
nct_sem_rewait(sem_t * semaphore)145 NSI_INLINE int nct_sem_rewait(sem_t *semaphore)
146 {
147 int ret;
148
149 while ((ret = sem_wait(semaphore)) == EINTR) {
150 /* Restart wait if we were interrupted */
151 }
152 return ret;
153 }
154
155 /**
156 * Helper function, run by a thread which is being aborted
157 */
abort_tail(struct threads_table_el * tt_el)158 static void abort_tail(struct threads_table_el *tt_el)
159 {
160 NCT_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
161 tt_el->thead_cnt, tt_el->thread_idx, __func__);
162
163 tt_el->running = false;
164 tt_el->state = ABORTED;
165 nct_exit_this_thread();
166 }
167
168 /**
169 * Helper function to block this thread until it is allowed to run again
170 * (either when the hosted OS swaps to it, or aborts it)
171 */
nct_wait_until_allowed(struct threads_table_el * tt_el,int this_th_nbr)172 static void nct_wait_until_allowed(struct threads_table_el *tt_el, int this_th_nbr)
173 {
174 tt_el->running = false;
175
176 NCT_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run\n",
177 tt_el->thead_cnt, this_th_nbr, __func__);
178
179 NSI_SAFE_CALL(nct_sem_rewait(&tt_el->sema));
180
181 if (tt_el->nct_status->terminate) {
182 nct_exit_this_thread();
183 }
184
185 if (tt_el->state == ABORTING) {
186 abort_tail(tt_el);
187 }
188
189 tt_el->running = true;
190
191 NCT_DEBUG("Thread [%i] %i: %s(): I'm allowed to run!\n",
192 tt_el->thead_cnt, this_th_nbr, __func__);
193 }
194
195 /**
196 * Helper function to let the thread <next_allowed_th> run
197 */
nct_let_run(struct nct_status_t * this,int next_allowed_th)198 static void nct_let_run(struct nct_status_t *this, int next_allowed_th)
199 {
200 struct threads_table_el *tt_el = ttable_get_element(this, next_allowed_th);
201
202 NCT_DEBUG("%s: We let thread [%i] %i run\n", __func__, tt_el->thead_cnt, next_allowed_th);
203
204 this->currently_allowed_thread = next_allowed_th;
205 NSI_SAFE_CALL(sem_post(&tt_el->sema));
206 }
207
208 /**
209 * Let the <next_allowed_thread_nbr> run and block this managed thread until it is allowed again
210 *
211 * The hosted OS shall call this when it has decided to swap in/out two of its threads,
212 * from the thread that is being swapped out.
213 *
214 * Note: If called without having ever let another managed thread run / from a thread not
215 * managed by this nct instance, it will behave like nct_first_thread_start(),
216 * and terminate the calling thread while letting the managed thread
217 * <next_allowed_thread_nbr> continue.
218 *
219 * inputs:
220 * this_arg: Pointer to this thread emulator instance as returned by nct_init()
221 * next_allowed_thread_nbr: Identifier of the thread the hosted OS wants to swap in
222 */
nct_swap_threads(void * this_arg,int next_allowed_thread_nbr)223 void nct_swap_threads(void *this_arg, int next_allowed_thread_nbr)
224 {
225 struct nct_status_t *this = (struct nct_status_t *)this_arg;
226 int this_th_nbr = this->currently_allowed_thread;
227 struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
228
229 nct_let_run(this, next_allowed_thread_nbr);
230
231 if (this_th_nbr == -1) { /* This is the first time a thread was swapped in */
232 NCT_DEBUG("%s: called from an unmanaged thread, terminating it\n", __func__);
233 nct_exit_this_thread();
234 }
235
236 if (tt_el->state == ABORTING) { /* We had set ourself as aborted => let's exit now */
237 NCT_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
238 tt_el->thead_cnt, this_th_nbr, __func__);
239 abort_tail(tt_el);
240 } else {
241 nct_wait_until_allowed(tt_el, this_th_nbr);
242 }
243 }
244
245 /**
246 * Let the very first hosted thread run, and exit the calling thread.
247 *
248 * The hosted OS shall call this when it has decided to swap into another
249 * thread, and wants to terminate the currently executing thread, which is not
250 * a thread managed by the thread emulator.
251 *
252 * This function allows to emulate a hosted OS doing its first swapping into one
253 * of its hosted threads from the init thread, abandoning/terminating that init
254 * thread.
255 */
nct_first_thread_start(void * this_arg,int next_allowed_thread_nbr)256 void nct_first_thread_start(void *this_arg, int next_allowed_thread_nbr)
257 {
258 struct nct_status_t *this = (struct nct_status_t *)this_arg;
259
260 nct_let_run(this, next_allowed_thread_nbr);
261 NCT_DEBUG("%s: Init thread dying now (rel mut)\n", __func__);
262 nct_exit_this_thread();
263 }
264
265 /**
266 * Helper function to start a hosted thread as a POSIX thread:
267 * It will block this new pthread until the embedded OS decides to "swap it in".
268 */
nct_thread_starter(void * arg_el)269 static void *nct_thread_starter(void *arg_el)
270 {
271 struct threads_table_el *tt_el = (struct threads_table_el *)arg_el;
272 const struct nct_status_t *this = tt_el->nct_status;
273
274 int thread_idx = tt_el->thread_idx;
275
276 NCT_DEBUG("Thread [%i] %i: %s: Starting\n", tt_el->thead_cnt, thread_idx, __func__);
277
278 /*
279 * The program may have been finished before this thread ever got to run
280 */
281 /* LCOV_EXCL_START */ /* See Note1 */
282 if (!this->threads_table || this->terminate) {
283 nct_exit_this_thread();
284 }
285 /* LCOV_EXCL_STOP */
286
287 /* Let's wait until the thread is swapped in */
288 nct_wait_until_allowed(tt_el, thread_idx);
289
290 this->fptr(tt_el->payload);
291
292 /*
293 * We only reach this point if the thread actually returns which should
294 * not happen. But we handle it gracefully just in case
295 */
296 /* LCOV_EXCL_START */
297 nsi_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
298 tt_el->thead_cnt,
299 thread_idx,
300 pthread_self());
301
302 tt_el->running = false;
303 tt_el->state = FAILED;
304
305 nct_exit_this_thread();
306
307 return NULL;
308 /* LCOV_EXCL_STOP */
309 }
310
311 /*
312 * Helper function to link the elements in a chunk to each other and initialize (to 0)
313 * their thread semaphores
314 */
ttable_init_elements(struct threads_table_el * chunk,int size)315 static void ttable_init_elements(struct threads_table_el *chunk, int size)
316 {
317 for (int i = 0; i < size - 1; i++) {
318 chunk[i].next = &chunk[i+1];
319 NSI_SAFE_CALL(sem_init(&chunk[i].sema, 0, 0));
320 }
321 chunk[size - 1].next = NULL;
322 NSI_SAFE_CALL(sem_init(&chunk[size - 1].sema, 0, 0));
323 }
324
325 /*
326 * Get a given element in the threads table
327 */
ttable_get_element(struct nct_status_t * this,int index)328 static struct threads_table_el *ttable_get_element(struct nct_status_t *this, int index)
329 {
330 struct threads_table_el *threads_table = this->threads_table;
331
332 if (index >= this->threads_table_size) { /* LCOV_EXCL_BR_LINE */
333 nsi_print_error_and_exit("%s: Programming error, attempted out of bound access to "
334 "thread table (%i>=%i)\n",
335 index, this->threads_table_size); /* LCOV_EXCL_LINE */
336 }
337 while (index >= NCT_ALLOC_CHUNK_SIZE) {
338 index -= NCT_ALLOC_CHUNK_SIZE;
339 threads_table = threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next;
340 }
341 return &threads_table[index];
342 }
343
344 /**
345 * Return the first free entry index in the threads table
346 */
ttable_get_empty_slot(struct nct_status_t * this)347 static int ttable_get_empty_slot(struct nct_status_t *this)
348 {
349 struct threads_table_el *tt_el = this->threads_table;
350
351 for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
352 if ((tt_el->state == NOTUSED)
353 || (NCT_REUSE_ABORTED_ENTRIES
354 && (tt_el->state == ABORTED))) {
355 return i;
356 }
357 }
358
359 /*
360 * else, we run out of table without finding an index
361 * => we expand the table:
362 */
363
364 struct threads_table_el *new_chunk;
365
366 new_chunk = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
367 if (new_chunk == NULL) { /* LCOV_EXCL_BR_LINE */
368 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
369 }
370
371 /* Link new chunk to last element */
372 tt_el = ttable_get_element(this, this->threads_table_size-1);
373 tt_el->next = new_chunk;
374
375 this->threads_table_size += NCT_ALLOC_CHUNK_SIZE;
376
377 ttable_init_elements(new_chunk, NCT_ALLOC_CHUNK_SIZE);
378
379 /* The first newly created entry is good, we return it */
380 return this->threads_table_size - NCT_ALLOC_CHUNK_SIZE;
381 }
382
383 /**
384 * Create a new pthread for a new hosted OS thread and initialize its NCT status
385 *
386 * Returns a unique integer thread identifier/index, which should be used
387 * to refer to this thread in future calls to the thread emulator.
388 *
389 * It takes as parameter a pointer which will be passed to the
390 * function registered in nct_init when the thread is swapped in.
391 *
392 * Note that the thread is created but not swapped in.
393 * The new thread execution will be held until nct_swap_threads()
394 * (or nct_first_thread_start()) is called enabling this newly created
395 * thread number.
396 */
nct_new_thread(void * this_arg,void * payload)397 int nct_new_thread(void *this_arg, void *payload)
398 {
399 struct nct_status_t *this = (struct nct_status_t *)this_arg;
400 struct threads_table_el *tt_el;
401 int t_slot;
402
403 t_slot = ttable_get_empty_slot(this);
404 tt_el = ttable_get_element(this, t_slot);
405
406 tt_el->state = USED;
407 tt_el->running = false;
408 tt_el->thead_cnt = this->thread_create_count++;
409 tt_el->payload = payload;
410 tt_el->nct_status = this;
411 tt_el->thread_idx = t_slot;
412
413 NSI_SAFE_CALL(pthread_create(&tt_el->thread,
414 NULL,
415 nct_thread_starter,
416 (void *)tt_el));
417
418 NCT_DEBUG("%s created thread [%i] %i [%lu]\n",
419 __func__, tt_el->thead_cnt, t_slot, tt_el->thread);
420
421 return t_slot;
422 }
423
424 /**
425 * Initialize an instance of the threading emulator.
426 *
427 * Returns a pointer to the initialize threading emulator instance.
428 * This pointer shall be passed to all subsequent calls of the
429 * threading emulator when interacting with this particular instance.
430 *
431 * The input fptr is a pointer to the hosted OS function
432 * to be called the first time a thread which is created on its request
433 * with nct_new_thread() is swapped in (from that thread context)
434 */
nct_init(void (* fptr)(void *))435 void *nct_init(void (*fptr)(void *))
436 {
437 struct nct_status_t *this;
438
439 /*
440 * Note: This (and the calloc below) won't be free'd by this code
441 * but left for the OS to clear at process end.
442 * This is a conscious choice, see nct_clean_up() for more info.
443 * If you got here due to valgrind's leak report, please use the
444 * provided valgrind suppression file valgrind.supp
445 */
446 this = calloc(1, sizeof(struct nct_status_t));
447 if (this == NULL) { /* LCOV_EXCL_BR_LINE */
448 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
449 }
450
451 this->fptr = fptr;
452 this->thread_create_count = 0;
453 this->currently_allowed_thread = -1;
454
455 this->threads_table = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
456 if (this->threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
457 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
458 }
459
460 this->threads_table_size = NCT_ALLOC_CHUNK_SIZE;
461
462 ttable_init_elements(this->threads_table, NCT_ALLOC_CHUNK_SIZE);
463
464 return (void *)this;
465 }
466
467 /**
468 * Free allocated memory by the threading emulator and clean up ordering all managed
469 * threads to abort.
470 * Note that this function cannot be called from a SW thread
471 * (the CPU is assumed halted. Otherwise we would cancel ourselves)
472 *
473 * Note: This function cannot guarantee the threads will be cancelled before the HW
474 * thread exists. The only way to do that, would be to wait for each of them in
475 * a join without detaching them, but that could lead to locks in some
476 * convoluted cases; as a call to this function can come due to a hosted OS
477 * assert or other error termination, we better do not assume things are working fine.
478 * This also means we do not clean all memory used by this NCT instance, as those
479 * threads need to access it still.
480 * => we prefer the supposed memory leak report from valgrind, and ensure we
481 * will not hang.
482 */
nct_clean_up(void * this_arg)483 void nct_clean_up(void *this_arg)
484 {
485 struct nct_status_t *this = (struct nct_status_t *)this_arg;
486
487 if (!this || !this->threads_table) { /* LCOV_EXCL_BR_LINE */
488 return; /* LCOV_EXCL_LINE */
489 }
490
491 this->terminate = true;
492
493 #if NCT_ENABLE_CANCEL
494 if (this->all_threads_released) {
495 return;
496 }
497 this->all_threads_released = true;
498
499 struct threads_table_el *tt_el = this->threads_table;
500
501 for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
502 if (tt_el->state != USED) {
503 continue;
504 }
505 NSI_SAFE_CALL(sem_post(&tt_el->sema));
506 }
507 #endif
508
509 /*
510 * This is the cleanup we do not do:
511 * for all threads
512 * sem_destroy(&tt_el->sema);
513 *
514 * free(this->threads_table);
515 * Including all chunks
516 * this->threads_table = NULL;
517 *
518 *
519 * free(this);
520 */
521 }
522
523
524 /*
525 * Mark a thread as being aborted. This will result in the underlying pthread
526 * being terminated some time later:
527 * If the thread is marking itself as aborting, as soon as it is swapped out
528 * by the hosted (embedded) OS
529 * If it is marking another thread, at some non-specific time soon in the future
530 * (But note that no embedded part of the aborted thread will execute anymore)
531 *
532 * * thread_idx : The thread identifier as provided during creation (return from nct_new_thread())
533 */
nct_abort_thread(void * this_arg,int thread_idx)534 void nct_abort_thread(void *this_arg, int thread_idx)
535 {
536 struct nct_status_t *this = (struct nct_status_t *)this_arg;
537 struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
538
539 if (thread_idx == this->currently_allowed_thread) {
540 NCT_DEBUG("Thread [%i] %i: %s Marked myself as aborting\n",
541 tt_el->thead_cnt, thread_idx, __func__);
542 tt_el->state = ABORTING;
543 } else {
544 if (tt_el->state != USED) { /* LCOV_EXCL_BR_LINE */
545 /* The thread may have been already aborted before */
546 return; /* LCOV_EXCL_LINE */
547 }
548
549 NCT_DEBUG("Aborting not scheduled thread [%i] %i\n", tt_el->thead_cnt, thread_idx);
550 tt_el->state = ABORTING;
551 NSI_SAFE_CALL(sem_post(&tt_el->sema));
552 }
553 }
554
555 /*
556 * Return a unique thread identifier for this thread for this
557 * run. This identifier is only meant for debug purposes
558 *
559 * thread_idx is the value returned by nct_new_thread()
560 */
nct_get_unique_thread_id(void * this_arg,int thread_idx)561 int nct_get_unique_thread_id(void *this_arg, int thread_idx)
562 {
563 struct nct_status_t *this = (struct nct_status_t *)this_arg;
564 struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
565
566 return tt_el->thead_cnt;
567 }
568
nct_thread_name_set(void * this_arg,int thread_idx,const char * str)569 int nct_thread_name_set(void *this_arg, int thread_idx, const char *str)
570 {
571 struct nct_status_t *this = (struct nct_status_t *)this_arg;
572 struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
573
574 return pthread_setname_np(tt_el->thread, str);
575 }
576
577 /*
578 * Notes about coverage:
579 *
580 * Note1:
581 *
582 * This condition will only be triggered in very unlikely cases
583 * (once every few full regression runs).
584 * It is therefore excluded from the coverage report to avoid confusing
585 * developers.
586 *
587 * Background: A pthread is created as soon as the hosted kernel creates
588 * a hosted thread. A pthread creation is an asynchronous process handled by the
589 * host kernel.
590 *
591 * This emulator normally keeps only 1 thread executing at a time.
592 * But part of the pre-initialization during creation of a new thread
593 * and some cleanup at the tail of the thread termination are executed
594 * in parallel to other threads.
595 * That is, the execution of those code paths is a bit indeterministic.
596 *
597 * Only when the hosted kernel attempts to swap to a new thread does this
598 * emulator need to wait until its pthread is ready and initialized
599 * (has reached nct_wait_until_allowed())
600 *
601 * In some cases (tests) hosted threads are created which are never actually needed
602 * (typically the idle thread). That means the test may finish before that
603 * thread's underlying pthread has reached nct_wait_until_allowed().
604 *
605 * In this unlikely cases the initialization or cleanup of the thread follows
606 * non-typical code paths.
607 * This code paths are there to ensure things work always, no matter
608 * the load of the host. Without them, very rare & mysterious segfault crashes
609 * would occur.
610 * But as they are very atypical and only triggered with some host loads,
611 * they will be covered in the coverage reports only rarely.
612 *
613 * Note2:
614 *
615 * Some other code will never or only very rarely trigger and is therefore
616 * excluded with LCOV_EXCL_LINE
617 *
618 */
619