1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <time.h>
7 #include <zephyr/logging/log.h>
8 #include <zephyr/logging/log_backend.h>
9 #include <zephyr/logging/log_ctrl.h>
10 #include <zephyr/logging/log_output.h>
11 #include <zephyr/logging/log_internal.h>
12 #include <zephyr/sys/mpsc_pbuf.h>
13 #include <zephyr/logging/log_link.h>
14 #include <zephyr/sys/printk.h>
15 #include <zephyr/sys_clock.h>
16 #include <zephyr/sys/clock.h>
17 #include <zephyr/init.h>
18 #include <zephyr/sys/__assert.h>
19 #include <zephyr/sys/atomic.h>
20 #include <zephyr/sys/iterable_sections.h>
21 #include <ctype.h>
22 #include <zephyr/logging/log_frontend.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/logging/log_output_dict.h>
25 #include <zephyr/logging/log_output_custom.h>
26 #include <zephyr/linker/utils.h>
27
28 #if CONFIG_USERSPACE && CONFIG_LOG_ALWAYS_RUNTIME
29 #include <zephyr/app_memory/app_memdomain.h>
30 K_APPMEM_PARTITION_DEFINE(k_log_partition);
31 #endif
32
33 LOG_MODULE_REGISTER(log);
34
35 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
36 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
37 #endif
38
39 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
40 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
41 #endif
42
43 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
44 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
45 #endif
46
47 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
48 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
49 #endif
50
51 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
52 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
53 #endif
54
55 #ifndef CONFIG_LOG_BUFFER_SIZE
56 #define CONFIG_LOG_BUFFER_SIZE 4
57 #endif
58
59 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
60 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
61 #else
62 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
63 #endif
64
65 #ifndef CONFIG_LOG_TAG_MAX_LEN
66 #define CONFIG_LOG_TAG_MAX_LEN 0
67 #endif
68
69 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
70 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
71 #endif
72
73 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
74 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
75 "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
76 "CONFIG_NO_OPTIMIZATIONS is set");
77 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
78 "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
79 "CONFIG_LOG_MODE_IMMEDIATE is set");
80 #endif
81
82 static const log_format_func_t format_table[] = {
83 [LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
84 log_output_msg_process : NULL,
85 [LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
86 log_output_msg_syst_process : NULL,
87 [LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
88 log_dict_output_msg_process : NULL,
89 [LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
90 log_custom_output_msg_process : NULL,
91 };
92
log_format_func_t_get(uint32_t log_type)93 log_format_func_t log_format_func_t_get(uint32_t log_type)
94 {
95 return format_table[log_type];
96 }
97
log_format_table_size(void)98 size_t log_format_table_size(void)
99 {
100 return ARRAY_SIZE(format_table);
101 }
102
103 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
104
105 static atomic_t initialized;
106 static bool panic_mode;
107 static bool backend_attached;
108 static atomic_t buffered_cnt;
109 static atomic_t dropped_cnt;
110 static k_tid_t proc_tid;
111 static struct k_timer log_process_thread_timer;
112
113 static log_timestamp_t dummy_timestamp(void);
114 static log_timestamp_get_t timestamp_func = dummy_timestamp;
115 static uint32_t timestamp_freq;
116 static log_timestamp_t proc_latency;
117 static log_timestamp_t prev_timestamp;
118 static atomic_t unordered_cnt;
119 static uint64_t last_failure_report;
120 static struct k_spinlock process_lock;
121
122 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
123 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
124 static struct mpsc_pbuf_buffer *curr_log_buffer;
125
126 #ifdef CONFIG_MPSC_PBUF
127 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
128 buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
129
130 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
131 const union mpsc_pbuf_generic *item);
132
133 static const struct mpsc_pbuf_buffer_config mpsc_config = {
134 .buf = (uint32_t *)buf32,
135 .size = ARRAY_SIZE(buf32),
136 .notify_drop = z_log_notify_drop,
137 .get_wlen = log_msg_generic_get_wlen,
138 .flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
139 MPSC_PBUF_MODE_OVERWRITE : 0) |
140 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
141 MPSC_PBUF_MAX_UTILIZATION : 0)
142 };
143 #endif
144
145 /* Check that default tag can fit in tag buffer. */
146 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
147 (BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
148 "Default string longer than tag capacity")));
149
150 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
151 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
152
153 static void msg_process(union log_msg_generic *msg);
154
dummy_timestamp(void)155 static log_timestamp_t dummy_timestamp(void)
156 {
157 return 0;
158 }
159
z_log_timestamp(void)160 log_timestamp_t z_log_timestamp(void)
161 {
162 return timestamp_func();
163 }
164
z_log_msg_post_finalize(void)165 static void z_log_msg_post_finalize(void)
166 {
167 atomic_val_t cnt = atomic_inc(&buffered_cnt);
168
169 if (panic_mode) {
170 k_spinlock_key_t key = k_spin_lock(&process_lock);
171 (void)log_process();
172
173 k_spin_unlock(&process_lock, key);
174 } else if (proc_tid != NULL) {
175 /*
176 * If CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1,
177 * timer is never needed. We release the processing
178 * thread after every message is posted.
179 */
180 if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1) {
181 if (cnt == 0) {
182 k_sem_give(&log_process_thread_sem);
183 }
184 } else {
185 if (cnt == 0) {
186 k_timer_start(&log_process_thread_timer,
187 K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
188 K_NO_WAIT);
189 } else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
190 (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
191 k_timer_stop(&log_process_thread_timer);
192 k_sem_give(&log_process_thread_sem);
193 } else {
194 /* No action needed. Message processing will be triggered by the
195 * timeout or when number of upcoming messages exceeds the
196 * threshold.
197 */
198 }
199 }
200 }
201 }
202
log_format_set_all_active_backends(size_t log_type)203 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
204 {
205 const struct log_backend *failed_backend = NULL;
206
207 STRUCT_SECTION_FOREACH(log_backend, backend) {
208 if (log_backend_is_active(backend)) {
209 int retCode = log_backend_format_set(backend, log_type);
210
211 if (retCode != 0) {
212 failed_backend = backend;
213 }
214 }
215 }
216 return failed_backend;
217 }
218
z_log_vprintk(const char * fmt,va_list ap)219 void z_log_vprintk(const char *fmt, va_list ap)
220 {
221 if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
222 return;
223 }
224
225 z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
226 LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
227 Z_LOG_MSG_CBPRINTF_FLAGS(0),
228 fmt, ap);
229 }
230
231 #ifndef CONFIG_LOG_TIMESTAMP_USE_REALTIME
default_get_timestamp(void)232 static log_timestamp_t default_get_timestamp(void)
233 {
234 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
235 sys_clock_tick_get() : k_cycle_get_32();
236 }
237
default_lf_get_timestamp(void)238 static log_timestamp_t default_lf_get_timestamp(void)
239 {
240 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
241 k_uptime_get() : k_uptime_get_32();
242 }
243 #else
default_rt_get_timestamp(void)244 static log_timestamp_t default_rt_get_timestamp(void)
245 {
246 struct timespec tspec;
247
248 sys_clock_gettime(SYS_CLOCK_REALTIME, &tspec);
249
250 return ((uint64_t)tspec.tv_sec * MSEC_PER_SEC) + (tspec.tv_nsec / NSEC_PER_MSEC);
251 }
252 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
253
log_core_init(void)254 void log_core_init(void)
255 {
256 panic_mode = false;
257 dropped_cnt = 0;
258 buffered_cnt = 0;
259
260 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
261 log_frontend_init();
262
263 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
264 for (uint16_t s = 0; s < log_src_cnt_get(0); s++) {
265 log_frontend_filter_set(s, CONFIG_LOG_MAX_LEVEL);
266 }
267 }
268
269 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
270 return;
271 }
272 }
273
274 /* Set default timestamp. */
275 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
276 log_set_timestamp_func(default_rt_get_timestamp, 1000U);
277 #else
278 if (sys_clock_hw_cycles_per_sec() > 1000000) {
279 log_set_timestamp_func(default_lf_get_timestamp, 1000U);
280 } else {
281 uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
282 CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
283 log_set_timestamp_func(default_get_timestamp, freq);
284 }
285 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
286
287 if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
288 z_log_msg_init();
289 }
290
291 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
292 z_log_runtime_filters_init();
293 }
294
295 STRUCT_SECTION_FOREACH(log_backend, backend) {
296 uint32_t id;
297 /* As first slot in filtering mask is reserved, backend ID has offset.*/
298 id = LOG_FILTER_FIRST_BACKEND_SLOT_IDX;
299 id += backend - log_backend_get(0);
300 log_backend_id_set(backend, id);
301 }
302 }
303
activate_foreach_backend(uint32_t mask)304 static uint32_t activate_foreach_backend(uint32_t mask)
305 {
306 uint32_t mask_cpy = mask;
307
308 while (mask_cpy) {
309 uint32_t i = __builtin_ctz(mask_cpy);
310 const struct log_backend *backend = log_backend_get(i);
311
312 mask_cpy &= ~BIT(i);
313 if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
314 mask &= ~BIT(i);
315 log_backend_enable(backend,
316 backend->cb->ctx,
317 CONFIG_LOG_MAX_LEVEL);
318 }
319 }
320
321 return mask;
322 }
323
z_log_init(bool blocking,bool can_sleep)324 static uint32_t z_log_init(bool blocking, bool can_sleep)
325 {
326 uint32_t mask = 0;
327
328 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
329 return 0;
330 }
331
332 __ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_MAX_BACKENDS);
333
334 if (atomic_inc(&initialized) != 0) {
335 return 0;
336 }
337
338 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
339 z_log_links_initiate();
340 }
341
342 int backend_index = 0;
343
344 STRUCT_SECTION_FOREACH(log_backend, backend) {
345 /* Activate autostart backends */
346 if (backend->autostart) {
347 log_backend_init(backend);
348
349 /* If backend has activation function then backend is
350 * not ready until activated.
351 */
352 if (log_backend_is_ready(backend) == 0) {
353 log_backend_enable(backend,
354 backend->cb->ctx,
355 CONFIG_LOG_MAX_LEVEL);
356 } else {
357 mask |= BIT(backend_index);
358 }
359 }
360
361 ++backend_index;
362 }
363
364 /* If blocking init, wait until all backends are activated. */
365 if (blocking) {
366 while (mask) {
367 mask = activate_foreach_backend(mask);
368 if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
369 k_msleep(10);
370 }
371 }
372 }
373
374 return mask;
375 }
376
log_init(void)377 void log_init(void)
378 {
379 (void)z_log_init(true, true);
380 }
381
log_thread_trigger(void)382 void log_thread_trigger(void)
383 {
384 if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
385 return;
386 }
387
388 k_timer_stop(&log_process_thread_timer);
389 k_sem_give(&log_process_thread_sem);
390 }
391
thread_set(k_tid_t process_tid)392 static void thread_set(k_tid_t process_tid)
393 {
394 proc_tid = process_tid;
395
396 if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
397 return;
398 }
399
400 if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
401 process_tid &&
402 buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
403 k_sem_give(&log_process_thread_sem);
404 }
405 }
406
log_thread_set(k_tid_t process_tid)407 void log_thread_set(k_tid_t process_tid)
408 {
409 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
410 __ASSERT_NO_MSG(0);
411 } else {
412 thread_set(process_tid);
413 }
414 }
415
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)416 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
417 {
418 if (timestamp_getter == NULL) {
419 return -EINVAL;
420 }
421
422 timestamp_func = timestamp_getter;
423 timestamp_freq = freq;
424 if (CONFIG_LOG_PROCESSING_LATENCY_US) {
425 proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
426 }
427
428 if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
429 log_output_timestamp_freq_set(freq);
430 }
431
432 return 0;
433 }
434
z_impl_log_panic(void)435 void z_impl_log_panic(void)
436 {
437 if (panic_mode) {
438 return;
439 }
440
441 /* If panic happened early logger might not be initialized.
442 * Forcing initialization of the logger and auto-starting backends.
443 */
444 (void)z_log_init(true, false);
445
446 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
447 log_frontend_panic();
448 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
449 goto out;
450 }
451 }
452
453 STRUCT_SECTION_FOREACH(log_backend, backend) {
454 if (log_backend_is_active(backend)) {
455 log_backend_panic(backend);
456 }
457 }
458
459 if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
460 /* Flush */
461 while (log_process() == true) {
462 }
463 }
464
465 out:
466 panic_mode = true;
467 }
468
469 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)470 void z_vrfy_log_panic(void)
471 {
472 z_impl_log_panic();
473 }
474 #include <zephyr/syscalls/log_panic_mrsh.c>
475 #endif
476
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)477 static bool msg_filter_check(struct log_backend const *backend,
478 union log_msg_generic *msg)
479 {
480 if (!z_log_item_is_msg(msg)) {
481 return true;
482 }
483
484 if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
485 return true;
486 }
487
488 uint32_t backend_level;
489 uint8_t level;
490 uint8_t domain_id;
491 int16_t source_id;
492
493 level = log_msg_get_level(&msg->log);
494 domain_id = log_msg_get_domain(&msg->log);
495 source_id = log_msg_get_source_id(&msg->log);
496
497 /* Accept all non-logging messages. */
498 if (level == LOG_LEVEL_NONE) {
499 return true;
500 }
501 if (source_id >= 0) {
502 backend_level = log_filter_get(backend, domain_id, source_id, true);
503
504 return (level <= backend_level);
505 } else {
506 return true;
507 }
508 }
509
msg_process(union log_msg_generic * msg)510 static void msg_process(union log_msg_generic *msg)
511 {
512 STRUCT_SECTION_FOREACH(log_backend, backend) {
513 if (log_backend_is_active(backend) &&
514 msg_filter_check(backend, msg)) {
515 log_backend_msg_process(backend, msg);
516 }
517 }
518 }
519
dropped_notify(void)520 void dropped_notify(void)
521 {
522 uint32_t dropped = z_log_dropped_read_and_clear();
523
524 STRUCT_SECTION_FOREACH(log_backend, backend) {
525 if (log_backend_is_active(backend)) {
526 log_backend_dropped(backend, dropped);
527 }
528 }
529 }
530
unordered_notify(void)531 void unordered_notify(void)
532 {
533 uint32_t unordered = atomic_set(&unordered_cnt, 0);
534
535 LOG_WRN("%d unordered messages since last report", unordered);
536 }
537
z_log_notify_backend_enabled(void)538 void z_log_notify_backend_enabled(void)
539 {
540 /* Wakeup logger thread after attaching first backend. It might be
541 * blocked with log messages pending.
542 */
543 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
544 k_sem_give(&log_process_thread_sem);
545 }
546
547 backend_attached = true;
548 }
549
z_log_unordered_pending(void)550 static inline bool z_log_unordered_pending(void)
551 {
552 return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
553 }
554
z_impl_log_process(void)555 bool z_impl_log_process(void)
556 {
557 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
558 return false;
559 }
560
561 k_timeout_t backoff = K_NO_WAIT;
562 union log_msg_generic *msg;
563
564 if (!backend_attached) {
565 return false;
566 }
567
568 msg = z_log_msg_claim(&backoff);
569
570 if (msg) {
571 msg_process(msg);
572 z_log_msg_free(msg);
573 atomic_dec(&buffered_cnt);
574 } else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
575 /* If backoff is requested, it means that there are pending
576 * messages but they are too new and processing shall back off
577 * to allow arrival of newer messages from remote domains.
578 */
579 k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
580
581 return false;
582 }
583
584 if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
585 bool dropped_pend = z_log_dropped_pending();
586 bool unordered_pend = z_log_unordered_pending();
587
588 if ((dropped_pend || unordered_pend) &&
589 (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
590 if (dropped_pend) {
591 dropped_notify();
592 }
593
594 if (unordered_pend) {
595 unordered_notify();
596 }
597 }
598
599 last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
600 }
601
602 return z_log_msg_pending();
603 }
604
605 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)606 bool z_vrfy_log_process(void)
607 {
608 return z_impl_log_process();
609 }
610 #include <zephyr/syscalls/log_process_mrsh.c>
611 #endif
612
z_impl_log_buffered_cnt(void)613 uint32_t z_impl_log_buffered_cnt(void)
614 {
615 return buffered_cnt;
616 }
617
618 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)619 uint32_t z_vrfy_log_buffered_cnt(void)
620 {
621 return z_impl_log_buffered_cnt();
622 }
623 #include <zephyr/syscalls/log_buffered_cnt_mrsh.c>
624 #endif
625
z_log_dropped(bool buffered)626 void z_log_dropped(bool buffered)
627 {
628 atomic_inc(&dropped_cnt);
629 if (buffered) {
630 atomic_dec(&buffered_cnt);
631 }
632
633 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
634 k_timer_stop(&log_process_thread_timer);
635 k_sem_give(&log_process_thread_sem);
636 }
637 }
638
z_log_dropped_read_and_clear(void)639 uint32_t z_log_dropped_read_and_clear(void)
640 {
641 return atomic_set(&dropped_cnt, 0);
642 }
643
z_log_dropped_pending(void)644 bool z_log_dropped_pending(void)
645 {
646 return dropped_cnt > 0;
647 }
648
z_log_msg_init(void)649 void z_log_msg_init(void)
650 {
651 #ifdef CONFIG_MPSC_PBUF
652 mpsc_pbuf_init(&log_buffer, &mpsc_config);
653 curr_log_buffer = &log_buffer;
654 #endif
655 }
656
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)657 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
658 {
659 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
660 return NULL;
661 }
662
663 return (struct log_msg *)mpsc_pbuf_alloc(
664 buffer, wlen,
665 (CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
666 ? K_FOREVER
667 : K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
668 }
669
z_log_msg_alloc(uint32_t wlen)670 struct log_msg *z_log_msg_alloc(uint32_t wlen)
671 {
672 return msg_alloc(&log_buffer, wlen);
673 }
674
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)675 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
676 {
677 union log_msg_generic *m = (union log_msg_generic *)msg;
678
679 if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
680 k_spinlock_key_t key;
681
682 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
683 key = k_spin_lock(&process_lock);
684 }
685
686 msg_process(m);
687
688 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
689 k_spin_unlock(&process_lock, key);
690 }
691
692 return;
693 }
694
695 #ifdef CONFIG_MPSC_PBUF
696 mpsc_pbuf_commit(buffer, &m->buf);
697 #endif
698 z_log_msg_post_finalize();
699 }
700
z_log_msg_commit(struct log_msg * msg)701 void z_log_msg_commit(struct log_msg *msg)
702 {
703 msg->hdr.timestamp = timestamp_func();
704 msg_commit(&log_buffer, msg);
705 }
706
z_log_msg_local_claim(void)707 union log_msg_generic *z_log_msg_local_claim(void)
708 {
709 #ifdef CONFIG_MPSC_PBUF
710 return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
711 #else
712 return NULL;
713 #endif
714
715 }
716
717 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)718 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
719 {
720 union log_msg_generic *msg = NULL;
721 struct log_msg_ptr *chosen = NULL;
722 log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
723 UINT64_MAX : UINT32_MAX;
724 int i = 0;
725
726 /* Else iterate on all available buffers and get the oldest message. */
727 STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
728 struct log_mpsc_pbuf *buf;
729
730 STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
731
732 #ifdef CONFIG_MPSC_PBUF
733 if (msg_ptr->msg == NULL) {
734 msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
735 }
736 #endif
737
738 if (msg_ptr->msg) {
739 log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
740
741 if (t < t_min) {
742 t_min = t;
743 msg = msg_ptr->msg;
744 chosen = msg_ptr;
745 curr_log_buffer = &buf->buf;
746 }
747 }
748 i++;
749 }
750
751 if (msg) {
752 if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
753 int32_t diff = t_min - (timestamp_func() - proc_latency);
754
755 if (diff > 0) {
756 /* Entry is too new. Back off for sometime to allow new
757 * remote messages to arrive which may have been captured
758 * earlier (but on other platform). Calculate for how
759 * long processing shall back off.
760 */
761 if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
762 *backoff = K_TICKS(diff);
763 } else {
764 *backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
765 timestamp_freq);
766 }
767
768 return NULL;
769 }
770 }
771
772 (*chosen).msg = NULL;
773 }
774
775 if (t_min < prev_timestamp) {
776 atomic_inc(&unordered_cnt);
777 }
778
779 prev_timestamp = t_min;
780
781 return msg;
782 }
783
z_log_msg_claim(k_timeout_t * backoff)784 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
785 {
786 size_t len;
787
788 STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
789
790 /* Use only one buffer if others are not registered. */
791 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
792 return z_log_msg_claim_oldest(backoff);
793 }
794
795 return z_log_msg_local_claim();
796 }
797
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)798 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
799 {
800 #ifdef CONFIG_MPSC_PBUF
801 mpsc_pbuf_free(buffer, &msg->buf);
802 #endif
803 }
804
z_log_msg_free(union log_msg_generic * msg)805 void z_log_msg_free(union log_msg_generic *msg)
806 {
807 msg_free(curr_log_buffer, msg);
808 }
809
msg_pending(struct mpsc_pbuf_buffer * buffer)810 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
811 {
812 #ifdef CONFIG_MPSC_PBUF
813 return mpsc_pbuf_is_pending(buffer);
814 #else
815 return false;
816 #endif
817 }
818
z_log_msg_pending(void)819 bool z_log_msg_pending(void)
820 {
821 size_t len;
822 int i = 0;
823
824 STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
825
826 if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
827 return msg_pending(&log_buffer);
828 }
829
830 STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
831 struct log_mpsc_pbuf *buf;
832
833 if (msg_ptr->msg) {
834 return true;
835 }
836
837 STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
838
839 if (msg_pending(&buf->buf)) {
840 return true;
841 }
842
843 i++;
844 }
845
846 return false;
847 }
848
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)849 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
850 {
851 struct log_msg *log_msg = (struct log_msg *)data;
852 size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
853 struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
854 struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
855
856 if (!local_msg) {
857 z_log_dropped(false);
858 return;
859 }
860
861 log_msg->hdr.desc.valid = 0;
862 log_msg->hdr.desc.busy = 0;
863 log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
864 memcpy((void *)local_msg, data, len);
865 msg_commit(mpsc_pbuffer, local_msg);
866 }
867
z_log_get_tag(void)868 const char *z_log_get_tag(void)
869 {
870 return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
871 }
872
log_set_tag(const char * str)873 int log_set_tag(const char *str)
874 {
875 #if CONFIG_LOG_TAG_MAX_LEN > 0
876 if (str == NULL) {
877 return -EINVAL;
878 }
879
880 size_t len = strlen(str);
881 size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
882
883 memcpy(tag, str, cpy_len);
884 tag[cpy_len] = '\0';
885
886 if (cpy_len < len) {
887 tag[cpy_len - 1] = '~';
888 return -ENOMEM;
889 }
890
891 return 0;
892 #else
893 return -ENOTSUP;
894 #endif
895 }
896
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)897 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
898 {
899 __ASSERT_NO_MSG(buf_size != NULL);
900 __ASSERT_NO_MSG(usage != NULL);
901
902 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
903 return -EINVAL;
904 }
905
906 mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
907
908 return 0;
909 }
910
log_mem_get_max_usage(uint32_t * max)911 int log_mem_get_max_usage(uint32_t *max)
912 {
913 __ASSERT_NO_MSG(max != NULL);
914
915 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
916 return -EINVAL;
917 }
918
919 return mpsc_pbuf_get_max_utilization(&log_buffer, max);
920 }
921
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)922 static void log_backend_notify_all(enum log_backend_evt event,
923 union log_backend_evt_arg *arg)
924 {
925 STRUCT_SECTION_FOREACH(log_backend, backend) {
926 log_backend_notify(backend, event, arg);
927 }
928 }
929
log_process_thread_timer_expiry_fn(struct k_timer * timer)930 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
931 {
932 k_sem_give(&log_process_thread_sem);
933 }
934
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)935 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
936 {
937 __ASSERT_NO_MSG(log_backend_count_get() > 0);
938 uint32_t links_active_mask = 0xFFFFFFFF;
939 uint8_t domain_offset = 0;
940 uint32_t activate_mask = z_log_init(false, false);
941 /* If some backends are not activated yet set periodical thread wake up
942 * to poll backends for readiness. Period is set arbitrary.
943 * If all backends are ready periodic wake up is not needed.
944 */
945 k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
946 bool processed_any = false;
947 thread_set(k_current_get());
948
949 /* Logging thread is periodically waken up until all backends that
950 * should be autostarted are ready.
951 */
952 while (true) {
953 if (activate_mask) {
954 activate_mask = activate_foreach_backend(activate_mask);
955 if (!activate_mask) {
956 /* Periodic wake up no longer needed since all
957 * backends are ready.
958 */
959 timeout = K_FOREVER;
960 }
961 }
962
963 /* Keep trying to activate links until all links are active. */
964 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
965 links_active_mask =
966 z_log_links_activate(links_active_mask, &domain_offset);
967 }
968
969
970 if (log_process() == false) {
971 if (processed_any) {
972 processed_any = false;
973 log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
974 }
975 (void)k_sem_take(&log_process_thread_sem, timeout);
976 } else {
977 processed_any = true;
978 }
979 }
980 }
981
982 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
983 struct k_thread logging_thread;
984
enable_logger(void)985 static int enable_logger(void)
986 {
987 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
988 k_timer_init(&log_process_thread_timer,
989 log_process_thread_timer_expiry_fn, NULL);
990 /* start logging thread */
991 k_thread_create(&logging_thread, logging_stack,
992 K_KERNEL_STACK_SIZEOF(logging_stack),
993 log_process_thread_func, NULL, NULL, NULL,
994 LOG_PROCESS_THREAD_PRIORITY, 0,
995 COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
996 K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
997 K_NO_WAIT));
998 k_thread_name_set(&logging_thread, "logging");
999 } else {
1000 (void)z_log_init(false, false);
1001 }
1002
1003 return 0;
1004 }
1005
1006 #ifdef CONFIG_LOG_MODE_DEFERRED
log_flush(void)1007 void log_flush(void)
1008 {
1009 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
1010 while (atomic_get(&buffered_cnt)) {
1011 log_thread_trigger();
1012 k_sleep(K_USEC(CONFIG_LOG_FLUSH_SLEEP_US));
1013 }
1014 } else {
1015 while (LOG_PROCESS()) {
1016 }
1017 }
1018 }
1019 #endif
1020
1021 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
1022