1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DISABLE_SYSCALL_TRACING
8
9 #include <zephyr/kernel.h>
10 #include <tracing_test.h>
11 #include <tracing_test_syscall.h>
12 #include <zephyr/tracing/tracing_format.h>
13
sys_trace_k_thread_switched_out(void)14 void sys_trace_k_thread_switched_out(void)
15 {
16 struct k_thread *thread;
17
18 thread = k_current_get();
19 TRACING_STRING("%s: %p\n", __func__, thread);
20 }
21
sys_trace_k_thread_switched_in(void)22 void sys_trace_k_thread_switched_in(void)
23 {
24 struct k_thread *thread;
25
26 thread = k_current_get();
27 TRACING_STRING("%s: %p\n", __func__, thread);
28 }
29
sys_trace_k_thread_priority_set(struct k_thread * thread)30 void sys_trace_k_thread_priority_set(struct k_thread *thread)
31 {
32 TRACING_STRING("%s: %p\n", __func__, thread);
33 }
34
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)35 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size,
36 int prio)
37 {
38 TRACING_STRING("%s: %p\n", __func__, thread);
39 }
40
sys_trace_k_thread_start(struct k_thread * thread)41 void sys_trace_k_thread_start(struct k_thread *thread)
42 {
43 TRACING_STRING("%s: %p\n", __func__, thread);
44 }
45
sys_trace_k_thread_abort(struct k_thread * thread)46 void sys_trace_k_thread_abort(struct k_thread *thread)
47 {
48 TRACING_STRING("%s: %p\n", __func__, thread);
49 }
50
sys_trace_k_thread_suspend(struct k_thread * thread)51 void sys_trace_k_thread_suspend(struct k_thread *thread)
52 {
53 TRACING_STRING("%s: %p\n", __func__, thread);
54 }
55
sys_trace_k_thread_resume(struct k_thread * thread)56 void sys_trace_k_thread_resume(struct k_thread *thread)
57 {
58 TRACING_STRING("%s: %p\n", __func__, thread);
59 }
60
sys_trace_k_thread_resume_exit(struct k_thread * thread)61 void sys_trace_k_thread_resume_exit(struct k_thread *thread)
62 {
63 TRACING_STRING("%s: %p\n", __func__, thread);
64 }
65
sys_trace_k_thread_ready(struct k_thread * thread)66 void sys_trace_k_thread_ready(struct k_thread *thread)
67 {
68 TRACING_STRING("%s: %p\n", __func__, thread);
69 }
70
sys_trace_k_thread_sched_ready(struct k_thread * thread)71 void sys_trace_k_thread_sched_ready(struct k_thread *thread)
72 {
73 TRACING_STRING("%s: %p\n", __func__, thread);
74 }
75
sys_trace_k_thread_sched_pend(struct k_thread * thread)76 void sys_trace_k_thread_sched_pend(struct k_thread *thread)
77 {
78 TRACING_STRING("%s: %p\n", __func__, thread);
79 }
80
sys_trace_k_thread_sched_abort(struct k_thread * thread)81 void sys_trace_k_thread_sched_abort(struct k_thread *thread)
82 {
83 TRACING_STRING("%s: %p\n", __func__, thread);
84 }
85
sys_trace_k_thread_sched_resume(struct k_thread * thread)86 void sys_trace_k_thread_sched_resume(struct k_thread *thread)
87 {
88 TRACING_STRING("%s: %p\n", __func__, thread);
89 }
90
sys_trace_k_thread_sched_suspend(struct k_thread * thread)91 void sys_trace_k_thread_sched_suspend(struct k_thread *thread)
92 {
93 TRACING_STRING("%s: %p\n", __func__, thread);
94 }
95
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)96 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout)
97 {
98 TRACING_STRING("%s\n", __func__);
99 }
100
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)101 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret)
102 {
103 TRACING_STRING("%s\n", __func__);
104 }
105
sys_trace_k_thread_usleep_enter(int32_t us)106 void sys_trace_k_thread_usleep_enter(int32_t us)
107 {
108 TRACING_STRING("%s\n", __func__);
109 }
110
sys_trace_k_thread_usleep_exit(int32_t us,int ret)111 void sys_trace_k_thread_usleep_exit(int32_t us, int ret)
112 {
113 TRACING_STRING("%s\n", __func__);
114 }
115
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)116 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)
117 {
118 TRACING_STRING("%s\n", __func__);
119 }
120
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)121 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)
122 {
123 TRACING_STRING("%s\n", __func__);
124 }
125
sys_trace_k_thread_abort_enter(struct k_thread * thread)126 void sys_trace_k_thread_abort_enter(struct k_thread *thread)
127 {
128 TRACING_STRING("%s: %p\n", __func__, thread);
129 }
130
sys_trace_k_thread_abort_exit(struct k_thread * thread)131 void sys_trace_k_thread_abort_exit(struct k_thread *thread)
132 {
133 TRACING_STRING("%s: %p\n", __func__, thread);
134 }
135
sys_trace_k_thread_yield(void)136 void sys_trace_k_thread_yield(void)
137 {
138 TRACING_STRING("%s\n", __func__);
139 }
140
sys_trace_thread_yield(struct k_thread * thread)141 void sys_trace_thread_yield(struct k_thread *thread)
142 {
143 TRACING_STRING("%s: %p\n", __func__, thread);
144 }
145
sys_trace_k_thread_wakeup(struct k_thread * thread)146 void sys_trace_k_thread_wakeup(struct k_thread *thread)
147 {
148 TRACING_STRING("%s: %p\n", __func__, thread);
149 }
150
sys_trace_k_thread_pend(struct k_thread * thread)151 void sys_trace_k_thread_pend(struct k_thread *thread)
152 {
153 TRACING_STRING("%s: %p\n", __func__, thread);
154 }
155
sys_trace_k_thread_info(struct k_thread * thread)156 void sys_trace_k_thread_info(struct k_thread *thread)
157 {
158 TRACING_STRING("%s: %p\n", __func__, thread);
159 }
160
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)161 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret)
162 {
163 TRACING_STRING("%s: %p\n", __func__, thread);
164 }
165
sys_trace_k_thread_sched_lock(void)166 void sys_trace_k_thread_sched_lock(void)
167 {
168 TRACING_STRING("%s\n", __func__);
169 }
170
sys_port_trace_k_thread_sched_unlock(void)171 void sys_port_trace_k_thread_sched_unlock(void)
172 {
173 TRACING_STRING("%s\n", __func__);
174 }
175
176
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)177 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout)
178 {
179 TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
180 }
181
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)182 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret)
183 {
184 TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
185 }
186
sys_trace_isr_enter(void)187 void sys_trace_isr_enter(void)
188 {
189 TRACING_STRING("%s\n", __func__);
190 }
191
sys_trace_isr_exit(void)192 void sys_trace_isr_exit(void)
193 {
194 TRACING_STRING("%s\n", __func__);
195 }
196
sys_trace_isr_exit_to_scheduler(void)197 void sys_trace_isr_exit_to_scheduler(void)
198 {
199 TRACING_STRING("%s\n", __func__);
200 }
201
sys_trace_idle(void)202 void sys_trace_idle(void)
203 {
204 #ifdef CONFIG_TRACING_IDLE
205 TRACING_STRING("%s\n", __func__);
206 #endif
207 }
208
sys_trace_idle_exit(void)209 void sys_trace_idle_exit(void)
210 {
211 #ifdef CONFIG_TRACING_IDLE
212 TRACING_STRING("%s\n", __func__);
213 #endif
214 }
215
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)216 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar)
217 {
218 TRACING_STRING("%s: %p\n", __func__, condvar);
219 }
220
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)221 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret)
222 {
223 TRACING_STRING("%s: %p\n", __func__, condvar);
224 }
225
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)226 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret)
227 {
228 TRACING_STRING("%s: %p\n", __func__, condvar);
229 }
230
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)231 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar)
232 {
233 TRACING_STRING("%s: %p\n", __func__, condvar);
234 }
235
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)236 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar)
237 {
238 TRACING_STRING("%s: %p\n", __func__, condvar);
239 }
240
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)241 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret)
242 {
243 TRACING_STRING("%s: %p\n", __func__, condvar);
244 }
245
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout)246 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex,
247 k_timeout_t timeout)
248 {
249 TRACING_STRING("%s: %p\n", __func__, condvar);
250 }
251
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout,int ret)252 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex,
253 k_timeout_t timeout, int ret)
254 {
255 TRACING_STRING("%s: %p\n", __func__, condvar);
256 }
257
258
sys_trace_k_sem_init(struct k_sem * sem,int ret)259 void sys_trace_k_sem_init(struct k_sem *sem, int ret)
260 {
261 TRACING_STRING("%s: %p\n", __func__, sem);
262 }
sys_trace_k_sem_give_enter(struct k_sem * sem)263 void sys_trace_k_sem_give_enter(struct k_sem *sem)
264 {
265 TRACING_STRING("%s: %p\n", __func__, sem);
266 }
267
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)268 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout)
269 {
270 TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
271 }
272
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)273 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret)
274 {
275 TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
276 }
277
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)278 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout)
279 {
280 TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
281 }
282
sys_trace_k_sem_reset(struct k_sem * sem)283 void sys_trace_k_sem_reset(struct k_sem *sem)
284 {
285 TRACING_STRING("%s: %p\n", __func__, sem);
286 }
287
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)288 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret)
289 {
290 TRACING_STRING("%s: %p, returns %d\n", __func__, mutex, ret);
291 }
292
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)293 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout)
294 {
295 TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
296 }
297
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)298 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret)
299 {
300 TRACING_STRING("%s: %p, timeout: %u, returns: %d\n", __func__, mutex,
301 (uint32_t)timeout.ticks, ret);
302 }
303
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)304 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout)
305 {
306 TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
307 }
308
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)309 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex)
310 {
311 TRACING_STRING("%s: %p\n", __func__, mutex);
312 }
313
314
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)315 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret)
316 {
317 TRACING_STRING("%s: %p, return: %d\n", __func__, mutex, ret);
318 }
319
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)320 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio)
321 {
322 TRACING_STRING("%s: %p, priority: %d\n", __func__, thread, prio);
323 }
324
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)325 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration,
326 k_timeout_t period)
327 {
328 TRACING_STRING("%s: %p, duration: %d, period: %d\n", __func__, timer,
329 (uint32_t)duration.ticks, (uint32_t)period.ticks);
330 }
331
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)332 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
333 k_timer_expiry_t stop_fn)
334 {
335 TRACING_STRING("%s: %p\n", __func__, timer);
336 }
337
sys_trace_k_timer_stop(struct k_timer * timer)338 void sys_trace_k_timer_stop(struct k_timer *timer)
339 {
340 TRACING_STRING("%s: %p\n", __func__, timer);
341 }
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)342 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer)
343 {
344 TRACING_STRING("%s: %p\n", __func__, timer);
345 }
346
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)347 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result)
348 {
349 TRACING_STRING("%s: %p\n", __func__, timer);
350 }
351
352
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)353 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes)
354 {
355 TRACING_STRING("%s: %p\n", __func__, h);
356 }
357
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)358 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
359 {
360 TRACING_STRING("%s: %p\n", __func__, h);
361 }
362
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)363 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
364 {
365 TRACING_STRING("%s: %p\n", __func__, h);
366 }
367
sys_trace_k_heap_calloc_enter(struct k_heap * h,size_t num,size_t size,k_timeout_t timeout)368 void sys_trace_k_heap_calloc_enter(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
369 {
370 TRACING_STRING("%s: %p\n", __func__, h);
371 }
372
sys_trace_k_heap_free(struct k_heap * h,void * mem)373 void sys_trace_k_heap_free(struct k_heap *h, void *mem)
374 {
375 TRACING_STRING("%s: %p\n", __func__, h);
376 }
377
sys_trace_k_heap_realloc_enter(struct k_heap * h,void * ptr,size_t bytes,k_timeout_t timeout)378 void sys_trace_k_heap_realloc_enter(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
379 {
380 TRACING_STRING("%s: %p\n", __func__, h);
381 }
sys_trace_k_heap_realloc_exit(struct k_heap * h,void * ptr,size_t bytes,k_timeout_t timeout,void * ret)382 void sys_trace_k_heap_realloc_exit(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout,
383 void *ret)
384 {
385 TRACING_STRING("%s: %p\n", __func__, h);
386 }
387
sys_trace_k_heap_alloc_helper_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)388 void sys_trace_k_heap_alloc_helper_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout)
389 {
390 TRACING_STRING("%s: %p\n", __func__, h);
391 }
392
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)393 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret)
394 {
395 TRACING_STRING("%s: %p\n", __func__, h);
396 }
397
sys_trace_k_heap_calloc_exit(struct k_heap * h,size_t num,size_t size,k_timeout_t timeout,void * ret)398 void sys_trace_k_heap_calloc_exit(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout,
399 void *ret)
400 {
401 TRACING_STRING("%s: %p\n", __func__, h);
402 }
403
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)404 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes,
405 k_timeout_t timeout, void *ret)
406 {
407 TRACING_STRING("%s: %p\n", __func__, h);
408 }
409
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h,struct k_heap ** hr)410 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h, struct k_heap **hr)
411 {
412 TRACING_STRING("%s: %p\n", __func__, h);
413 }
414
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h,struct k_heap ** hr)415 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h, struct k_heap **hr)
416 {
417 TRACING_STRING("%s: %p\n", __func__, h);
418 }
419
sys_trace_k_queue_init(struct k_queue * queue)420 void sys_trace_k_queue_init(struct k_queue *queue)
421 {
422 TRACING_STRING("%s: %p\n", __func__, queue);
423 }
424
sys_trace_k_queue_cancel_wait(struct k_queue * queue)425 void sys_trace_k_queue_cancel_wait(struct k_queue *queue)
426 {
427 TRACING_STRING("%s: %p\n", __func__, queue);
428 }
429
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)430 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data)
431 {
432 TRACING_STRING("%s: %p\n", __func__, queue);
433 }
434
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)435 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data)
436 {
437 TRACING_STRING("%s: %p\n", __func__, queue);
438 }
439
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)440 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data)
441 {
442 TRACING_STRING("%s: %p\n", __func__, queue);
443 }
444
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)445 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret)
446 {
447 TRACING_STRING("%s: %p\n", __func__, queue);
448 }
449
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)450 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout)
451 {
452 TRACING_STRING("%s: %p\n", __func__, queue);
453 }
454
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)455 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret)
456 {
457 TRACING_STRING("%s: %p\n", __func__, queue);
458 }
459
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)460 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret)
461 {
462 TRACING_STRING("%s: %p\n", __func__, queue);
463 }
464
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)465 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret)
466 {
467 TRACING_STRING("%s: %p\n", __func__, queue);
468 }
469
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)470 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data)
471 {
472 TRACING_STRING("%s: %p\n", __func__, queue);
473 }
474
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)475 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret)
476 {
477 TRACING_STRING("%s: %p\n", __func__, queue);
478 }
479
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)480 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data)
481 {
482 TRACING_STRING("%s: %p\n", __func__, queue);
483 }
484
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)485 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret)
486 {
487 TRACING_STRING("%s: %p\n", __func__, queue);
488 }
489
490
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)491 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
492 {
493 TRACING_STRING("%s: %p\n", __func__, slab);
494 }
495
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)496 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
497 {
498 TRACING_STRING("%s: %p\n", __func__, slab);
499 }
500
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)501 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout,
502 int ret)
503 {
504 TRACING_STRING("%s: %p\n", __func__, slab);
505 }
506
sys_trace_k_mem_slab_free_enter(struct k_mem_slab * slab,void * mem)507 void sys_trace_k_mem_slab_free_enter(struct k_mem_slab *slab, void *mem)
508 {
509 TRACING_STRING("%s: %p\n", __func__, slab);
510 }
511
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void * mem)512 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void *mem)
513 {
514 TRACING_STRING("%s: %p\n", __func__, slab);
515 }
516
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)517 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data)
518 {
519 TRACING_STRING("%s: %p\n", __func__, fifo);
520 }
521
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)522 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data)
523 {
524 TRACING_STRING("%s: %p\n", __func__, fifo);
525 }
526
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)527 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout)
528 {
529 TRACING_STRING("%s: %p\n", __func__, fifo);
530 }
531
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)532 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret)
533 {
534 TRACING_STRING("%s: %p\n", __func__, fifo);
535 }
536
sys_trace_syscall_enter(uint32_t syscall_id,const char * syscall_name)537 void sys_trace_syscall_enter(uint32_t syscall_id, const char *syscall_name)
538 {
539 TRACING_STRING("%s: %s (%u) enter\n", __func__, syscall_name, syscall_id);
540 }
541
sys_trace_syscall_exit(uint32_t syscall_id,const char * syscall_name)542 void sys_trace_syscall_exit(uint32_t syscall_id, const char *syscall_name)
543 {
544 TRACING_STRING("%s: %s (%u) exit\n", __func__, syscall_name, syscall_id);
545 }
546
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * data)547 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *data)
548 {
549 TRACING_STRING("%s: %p (%p) enter\n", __func__, user_cb, data);
550 }
551
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * data)552 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *data)
553 {
554 TRACING_STRING("%s: %p (%p) exit\n", __func__, user_cb, data);
555 }
556