1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <lib/debuglog.h>
8 
9 #include <dev/udisplay.h>
10 #include <err.h>
11 #include <fbl/atomic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread.h>
14 #include <lib/crashlog.h>
15 #include <kernel/cmdline.h>
16 #include <lib/io.h>
17 #include <lib/version.h>
18 #include <lk/init.h>
19 #include <platform.h>
20 #include <stdint.h>
21 #include <string.h>
22 #include <vm/vm.h>
23 #include <zircon/types.h>
24 
25 #define DLOG_SIZE (128u * 1024u)
26 #define DLOG_MASK (DLOG_SIZE - 1u)
27 
28 static_assert((DLOG_SIZE & DLOG_MASK) == 0u, "must be power of two");
29 static_assert(DLOG_MAX_RECORD <= DLOG_SIZE, "wat");
30 static_assert((DLOG_MAX_RECORD & 3) == 0, "E_DONT_DO_THAT");
31 
32 static uint8_t DLOG_DATA[DLOG_SIZE];
33 
34 static dlog_t DLOG = {
35     .lock = SPIN_LOCK_INITIAL_VALUE,
36     .head = 0,
37     .tail = 0,
38     .data = DLOG_DATA,
39     .panic = false,
40     .event = EVENT_INITIAL_VALUE(DLOG.event, 0, EVENT_FLAG_AUTOUNSIGNAL),
41 
42     .readers_lock = MUTEX_INITIAL_VALUE(DLOG.readers_lock),
43     .readers = LIST_INITIAL_VALUE(DLOG.readers),
44 };
45 
46 static thread_t* notifier_thread;
47 static thread_t* dumper_thread;
48 
49 // Used to request that notifier and dumper threads terminate.
50 static fbl::atomic_bool notifier_shutdown_requested;
51 static fbl::atomic_bool dumper_shutdown_requested;
52 
53 // dlog_bypass_ will directly write to console. It also has the side effect of
54 // disabling uart Tx interrupts. So all serial console writes are polling.
55 static bool dlog_bypass_ = false;
56 
57 // We need to preserve the compile time switch (ENABLE_KERNEL_LL_DEBUG), even
58 // though we add a kernel cmdline (kernel.bypass-debuglog), to bypass the debuglog.
59 // This is to allow very early prints in the kernel to go to the serial console.
60 
61 // Called first thing in init, so very early printfs can go to serial console.
dlog_bypass_init_early(void)62 void dlog_bypass_init_early(void) {
63 #ifdef ENABLE_KERNEL_LL_DEBUG
64     dlog_bypass_ = true;
65 #endif
66 }
67 
68 // Called after kernel cmdline options are parsed (in platform_early_init()).
69 // The compile switch (if enabled) overrides the kernel cmdline switch.
dlog_bypass_init(void)70 void dlog_bypass_init(void) {
71     if (dlog_bypass_ == false)
72         dlog_bypass_ = cmdline_get_bool("kernel.bypass-debuglog", false);
73 }
74 
dlog_bypass(void)75 bool dlog_bypass(void) {
76     return dlog_bypass_;
77 }
78 
79 // The debug log maintains a circular buffer of debug log records,
80 // consisting of a common header (dlog_header_t) followed by up
81 // to 224 bytes of textual log message.  Records are aligned on
82 // uint32_t boundaries, so the header word which indicates the
83 // true size of the record and the space it takes in the fifo
84 // can always be read with a single uint32_t* read (the header
85 // or body may wrap but the initial header word never does).
86 //
87 // The ring buffer position is maintained by continuously incrementing
88 // head and tail pointers (type size_t, so uint64_t on 64bit systems),
89 //
90 // This allows readers to trivial compute if their local tail
91 // pointer has "fallen out" of the fifo (an entire fifo's worth
92 // of messages were written since they last tried to read) and then
93 // they can snap their tail to the global tail and restart
94 //
95 //
96 // Tail indicates the oldest message in the debug log to read
97 // from, Head indicates the next space in the debug log to write
98 // a new message to.  They are clipped to the actual buffer by
99 // DLOG_MASK.
100 //
101 //       T                     T
102 //  [....XXXX....]  [XX........XX]
103 //           H         H
104 
105 #define ALIGN4(n) (((n) + 3) & (~3))
106 
dlog_write(uint32_t flags,const void * data_ptr,size_t len)107 zx_status_t dlog_write(uint32_t flags, const void* data_ptr, size_t len) {
108     const uint8_t* ptr = static_cast<const uint8_t*>(data_ptr);
109     dlog_t* log = &DLOG;
110 
111     if (len > DLOG_MAX_DATA) {
112         return ZX_ERR_OUT_OF_RANGE;
113     }
114 
115     if (log->panic) {
116         return ZX_ERR_BAD_STATE;
117     }
118 
119     // Our size "on the wire" must be a multiple of 4, so we know
120     // that worst case there will be room for a header skipping
121     // the last n bytes when the fifo wraps
122     size_t wiresize = DLOG_MIN_RECORD + ALIGN4(len);
123 
124     // Prepare the record header before taking the lock
125     dlog_header_t hdr;
126     hdr.header = static_cast<uint32_t>(DLOG_HDR_SET(wiresize, DLOG_MIN_RECORD + len));
127     hdr.datalen = static_cast<uint16_t>(len);
128     hdr.flags = static_cast<uint16_t>(flags);
129     hdr.timestamp = current_time();
130     thread_t* t = get_current_thread();
131     if (t) {
132         hdr.pid = t->user_pid;
133         hdr.tid = t->user_tid;
134     } else {
135         hdr.pid = 0;
136         hdr.tid = 0;
137     }
138 
139     spin_lock_saved_state_t state;
140     spin_lock_irqsave(&log->lock, state);
141 
142     // Discard records at tail until there is enough
143     // space for the new record.
144     while ((log->head - log->tail) > (DLOG_SIZE - wiresize)) {
145         uint32_t header = *reinterpret_cast<uint32_t*>(log->data + (log->tail & DLOG_MASK));
146         log->tail += DLOG_HDR_GET_FIFOLEN(header);
147     }
148 
149     size_t offset = (log->head & DLOG_MASK);
150 
151     size_t fifospace = DLOG_SIZE - offset;
152 
153     if (fifospace >= wiresize) {
154         // everything fits in one write, simple case!
155         memcpy(log->data + offset, &hdr, sizeof(hdr));
156         memcpy(log->data + offset + sizeof(hdr), ptr, len);
157     } else if (fifospace < sizeof(hdr)) {
158         // the wrap happens in the header
159         memcpy(log->data + offset, &hdr, fifospace);
160         memcpy(log->data, reinterpret_cast<uint8_t*>(&hdr) + fifospace, sizeof(hdr) - fifospace);
161         memcpy(log->data + (sizeof(hdr) - fifospace), ptr, len);
162     } else {
163         // the wrap happens in the data
164         memcpy(log->data + offset, &hdr, sizeof(hdr));
165         offset += sizeof(hdr);
166         fifospace -= sizeof(hdr);
167         memcpy(log->data + offset, ptr, fifospace);
168         memcpy(log->data, ptr + fifospace, len - fifospace);
169     }
170     log->head += wiresize;
171 
172     // Need to check this before re-releasing the log lock, since we may
173     // re-enable interrupts while doing that.  If interrupts are enabled when we
174     // make this check, we could see the following sequence of events between
175     // two CPUs and incorrectly conclude we are holding the thread lock:
176     // C2: Acquire thread_lock
177     // C1: Running this thread, evaluate spin_lock_holder_cpu(&thread_lock) -> C2
178     // C1: Context switch away
179     // C2: Release thread_lock
180     // C2: Context switch to this thread
181     // C2: Running this thread, evaluate arch_curr_cpu_num() -> C2
182     bool holding_thread_lock = spin_lock_holder_cpu(&thread_lock) == arch_curr_cpu_num();
183 
184     spin_unlock_irqrestore(&log->lock, state);
185 
186     [log, holding_thread_lock]() TA_NO_THREAD_SAFETY_ANALYSIS {
187         // if we happen to be called from within the global thread lock, use a
188         // special version of event signal
189         if (holding_thread_lock) {
190             event_signal_thread_locked(&log->event);
191         } else {
192             event_signal(&log->event, false);
193         }
194     }();
195 
196 
197     return ZX_OK;
198 }
199 
200 // TODO: support reading multiple messages at a time
201 // TODO: filter with flags
dlog_read(dlog_reader_t * rdr,uint32_t flags,void * data_ptr,size_t len,size_t * _actual)202 zx_status_t dlog_read(dlog_reader_t* rdr, uint32_t flags, void* data_ptr,
203                       size_t len, size_t* _actual) {
204     uint8_t* ptr = static_cast<uint8_t*>(data_ptr);
205     // must be room for worst-case read
206     if (len < DLOG_MAX_RECORD) {
207         return ZX_ERR_BUFFER_TOO_SMALL;
208     }
209 
210     dlog_t* log = rdr->log;
211     zx_status_t status = ZX_ERR_SHOULD_WAIT;
212 
213     spin_lock_saved_state_t state;
214     spin_lock_irqsave(&log->lock, state);
215 
216     size_t rtail = rdr->tail;
217 
218     // If the read-tail is not within the range of log-tail..log-head
219     // this reader has been lapped by a writer and we reset our read-tail
220     // to the current log-tail.
221     //
222     if ((log->head - log->tail) < (log->head - rtail)) {
223         rtail = log->tail;
224     }
225 
226     if (rtail != log->head) {
227         size_t offset = (rtail & DLOG_MASK);
228         uint32_t header = *reinterpret_cast<uint32_t*>(log->data + offset);
229 
230         size_t actual = DLOG_HDR_GET_READLEN(header);
231         size_t fifospace = DLOG_SIZE - offset;
232 
233         if (fifospace >= actual) {
234             memcpy(ptr, log->data + offset, actual);
235         } else {
236             memcpy(ptr, log->data + offset, fifospace);
237             memcpy(ptr + fifospace, log->data, actual - fifospace);
238         }
239 
240         *_actual = actual;
241         status = ZX_OK;
242 
243         rtail += DLOG_HDR_GET_FIFOLEN(header);
244     }
245 
246     rdr->tail = rtail;
247 
248     spin_unlock_irqrestore(&log->lock, state);
249 
250     return status;
251 }
252 
dlog_reader_init(dlog_reader_t * rdr,void (* notify)(void *),void * cookie)253 void dlog_reader_init(dlog_reader_t* rdr, void (*notify)(void*), void* cookie) {
254     dlog_t* log = &DLOG;
255 
256     rdr->log = log;
257     rdr->notify = notify;
258     rdr->cookie = cookie;
259 
260     mutex_acquire(&log->readers_lock);
261     list_add_tail(&log->readers, &rdr->node);
262 
263     bool do_notify = false;
264 
265     spin_lock_saved_state_t state;
266     spin_lock_irqsave(&log->lock, state);
267     rdr->tail = log->tail;
268     do_notify = (log->tail != log->head);
269     spin_unlock_irqrestore(&log->lock, state);
270 
271     // simulate notify callback for events that arrived
272     // before we were initialized
273     if (do_notify && notify) {
274         notify(cookie);
275     }
276 
277     mutex_release(&log->readers_lock);
278 }
279 
dlog_reader_destroy(dlog_reader_t * rdr)280 void dlog_reader_destroy(dlog_reader_t* rdr) {
281     dlog_t* log = rdr->log;
282 
283     mutex_acquire(&log->readers_lock);
284     list_delete(&rdr->node);
285     mutex_release(&log->readers_lock);
286 }
287 
288 // The debuglog notifier thread observes when the debuglog is
289 // written and calls the notify callback on any readers that
290 // have one so they can process new log messages.
debuglog_notifier(void * arg)291 static int debuglog_notifier(void* arg) {
292     dlog_t* log = &DLOG;
293 
294     for (;;) {
295         if (notifier_shutdown_requested.load()) {
296             break;
297         }
298         event_wait(&log->event);
299 
300         // notify readers that new log items were posted
301         mutex_acquire(&log->readers_lock);
302         dlog_reader_t* rdr;
303         list_for_every_entry (&log->readers, rdr, dlog_reader_t, node) {
304             if (rdr->notify) {
305                 rdr->notify(rdr->cookie);
306             }
307         }
308         mutex_release(&log->readers_lock);
309     }
310     return ZX_OK;
311 }
312 
313 // Common bottleneck between sys_debug_write() and debuglog_dumper()
314 // to reduce interleaved messages between the serial console and the
315 // debuglog drainer.
dlog_serial_write(const char * data,size_t len)316 void dlog_serial_write(const char* data, size_t len) {
317     if (dlog_bypass_ == true) {
318         // If LL DEBUG is enabled we take this path which uses a spinlock
319         // and prevents the direct writes from the kernel from interleaving
320         // with our output
321         __kernel_serial_write(data, len);
322     } else {
323         // Otherwise we can use a mutex and avoid time under spinlock
324         static mutex_t lock = MUTEX_INITIAL_VALUE(lock);
325         mutex_acquire(&lock);
326         platform_dputs_thread(data, len);
327         mutex_release(&lock);
328     }
329 }
330 
331 // The debuglog dumper thread creates a reader to observe
332 // debuglog writes and dump them to the kernel consoles
333 // and kernel serial console.
debuglog_dumper_notify(void * cookie)334 static void debuglog_dumper_notify(void* cookie) {
335     event_t* event = reinterpret_cast<event_t*>(cookie);
336     event_signal(event, false);
337 }
338 
339 static event_t dumper_event = EVENT_INITIAL_VALUE(dumper_event, 0, EVENT_FLAG_AUTOUNSIGNAL);
340 
debuglog_dumper(void * arg)341 static int debuglog_dumper(void* arg) {
342     // assembly buffer with room for log text plus header text
343     char tmp[DLOG_MAX_DATA + 128];
344 
345     struct {
346         dlog_header_t hdr;
347         char data[DLOG_MAX_DATA + 1];
348     } rec;
349 
350     dlog_reader_t reader;
351     dlog_reader_init(&reader, debuglog_dumper_notify, &dumper_event);
352 
353     for (;;) {
354         if (dumper_shutdown_requested.load()) {
355             break;
356         }
357         event_wait(&dumper_event);
358 
359         // dump records to kernel console
360         size_t actual;
361         while (dlog_read(&reader, 0, &rec, DLOG_MAX_RECORD, &actual) == ZX_OK) {
362             if (rec.hdr.datalen && (rec.data[rec.hdr.datalen - 1] == '\n')) {
363                 rec.data[rec.hdr.datalen - 1] = 0;
364             } else {
365                 rec.data[rec.hdr.datalen] = 0;
366             }
367             int n;
368             n = snprintf(tmp, sizeof(tmp), "[%05d.%03d] %05" PRIu64 ".%05" PRIu64 "> %s\n",
369                          (int)(rec.hdr.timestamp / ZX_SEC(1)),
370                          (int)((rec.hdr.timestamp / ZX_MSEC(1)) % 1000ULL),
371                          rec.hdr.pid, rec.hdr.tid, rec.data);
372             if (n > (int)sizeof(tmp)) {
373                 n = sizeof(tmp);
374             }
375             __kernel_console_write(tmp, n);
376             dlog_serial_write(tmp, n);
377         }
378     }
379 
380     return 0;
381 }
382 
print_mmap(uintptr_t bias,void * begin,void * end,const char * perm)383 static void print_mmap(uintptr_t bias, void* begin, void* end, const char* perm) {
384   uintptr_t start = reinterpret_cast<uintptr_t>(begin);
385   size_t size = (uintptr_t)end - start;
386   dprintf(INFO, "{{{mmap:%#lx:%#lx:load:0:%s:%#lx}}}\n", start, size, perm, start + bias);
387 }
388 
dlog_bluescreen_init(void)389 void dlog_bluescreen_init(void) {
390     // if we're panicing, stop processing log writes
391     // they'll fail over to kernel console and serial
392     DLOG.panic = true;
393 
394     udisplay_bind_gfxconsole();
395 
396     // replay debug log?
397 
398     dprintf(INFO, "\nZIRCON KERNEL PANIC\n\n");
399     dprintf(INFO, "UPTIME: %" PRIi64 "ms\n", current_time() / ZX_MSEC(1));
400     dprintf(INFO, "BUILDID %s\n\n", version.buildid);
401 
402     // Log the ELF build ID in the format the symbolizer scripts understand.
403     if (version.elf_build_id[0] != '\0') {
404         uintptr_t bias = KERNEL_BASE - reinterpret_cast<uintptr_t>(__code_start);
405         dprintf(INFO, "{{{module:0:kernel:elf:%s}}}\n", version.elf_build_id);
406         // These four mappings match the mappings printed by vm_init().
407         print_mmap(bias, __code_start, __code_end, "rx");
408         print_mmap(bias, __rodata_start, __rodata_end, "r");
409         print_mmap(bias, __data_start, __data_end, "rw");
410         print_mmap(bias, __bss_start, _end, "rw");
411         dprintf(INFO, "dso: id=%s base=%#lx name=zircon.elf\n",
412                 version.elf_build_id, (uintptr_t)__code_start);
413     }
414 
415     crashlog.base_address = (uintptr_t)__code_start;
416 }
417 
dlog_shutdown(void)418 void dlog_shutdown(void) {
419     DEBUG_ASSERT(!arch_ints_disabled());
420 
421     dprintf(INFO, "Shutting down debuglog\n");
422 
423     // Limit how long we wait for the threads to terminate.
424     const zx_time_t deadline = current_time() + ZX_SEC(5);
425 
426     // Shutdown the notifier thread first. Ordering is important because the notifier thread is
427     // responsible for passing log records to the dumper.
428     notifier_shutdown_requested.store(true);
429     event_signal(&DLOG.event, false);
430     if (notifier_thread != nullptr) {
431         zx_status_t status = thread_join(notifier_thread, nullptr, deadline);
432         if (status != ZX_OK) {
433             dprintf(INFO, "Failed to join notifier thread: %d\n", status);
434         }
435         notifier_thread = nullptr;
436     }
437 
438     dumper_shutdown_requested.store(true);
439     event_signal(&dumper_event, false);
440     if (dumper_thread != nullptr) {
441         zx_status_t status = thread_join(dumper_thread, nullptr, deadline);
442         if (status != ZX_OK) {
443             dprintf(INFO, "Failed to join dumper thread: %d\n", status);
444         }
445         dumper_thread = nullptr;
446     }
447 }
448 
dlog_init_hook(uint level)449 static void dlog_init_hook(uint level) {
450     DEBUG_ASSERT(notifier_thread == nullptr);
451     DEBUG_ASSERT(dumper_thread == nullptr);
452 
453     if ((notifier_thread = thread_create("debuglog-notifier", debuglog_notifier, NULL,
454                                          HIGH_PRIORITY - 1)) != NULL) {
455         thread_resume(notifier_thread);
456     }
457 
458     if (platform_serial_enabled() || platform_early_console_enabled()) {
459         if ((dumper_thread = thread_create("debuglog-dumper", debuglog_dumper, NULL,
460                                            HIGH_PRIORITY - 2)) != NULL) {
461             thread_resume(dumper_thread);
462         }
463     }
464 }
465 
466 LK_INIT_HOOK(debuglog, dlog_init_hook, LK_INIT_LEVEL_THREADING - 1);
467