1 /******************************************************************************
2 * debugtrace.c
3 *
4 * Debugtrace for Xen
5 */
6
7
8 #include <xen/console.h>
9 #include <xen/cpu.h>
10 #include <xen/init.h>
11 #include <xen/keyhandler.h>
12 #include <xen/lib.h>
13 #include <xen/mm.h>
14 #include <xen/param.h>
15 #include <xen/percpu.h>
16 #include <xen/sections.h>
17 #include <xen/serial.h>
18 #include <xen/smp.h>
19 #include <xen/spinlock.h>
20 #include <xen/watchdog.h>
21
22 #define DEBUGTRACE_COUNT_WRAP 99999999
23
24 /* Send output direct to console, or buffer it? */
25 static volatile bool debugtrace_send_to_console;
26
27 struct debugtrace_data {
28 unsigned long prd; /* Producer index. */
29 unsigned long wrap_cnt;
30 char buf[];
31 };
32
33 static struct debugtrace_data *dt_data;
34 static DEFINE_PER_CPU(struct debugtrace_data *, dt_cpu_data);
35
36 static unsigned long __read_mostly debugtrace_bytes = 128 << 10;
37 static bool __read_mostly debugtrace_per_cpu;
38 static bool debugtrace_buf_empty = true;
39 static bool debugtrace_used;
40 static DEFINE_SPINLOCK(debugtrace_lock);
41
debugtrace_parse_param(const char * s)42 static int __init cf_check debugtrace_parse_param(const char *s)
43 {
44 unsigned long bytes;
45
46 if ( !strncmp(s, "cpu:", 4) )
47 {
48 debugtrace_per_cpu = true;
49 s += 4;
50 }
51 bytes = parse_size_and_unit(s, &s);
52
53 if ( *s )
54 return -EINVAL;
55
56 debugtrace_bytes = MAX(bytes, PAGE_SIZE);
57
58 /* Round size down to next power of two. */
59 while ( (bytes = (debugtrace_bytes & (debugtrace_bytes - 1))) != 0 )
60 debugtrace_bytes = bytes;
61
62 return 0;
63 }
64 custom_param("debugtrace", debugtrace_parse_param);
65
debugtrace_dump_buffer(struct debugtrace_data * data,const char * which)66 static void debugtrace_dump_buffer(struct debugtrace_data *data,
67 const char *which)
68 {
69 if ( !data )
70 return;
71
72 printk("debugtrace_dump() %s buffer starting\n", which);
73
74 /* Print oldest portion of the ring. */
75 if ( data->buf[data->prd] != '\0' )
76 console_serial_puts(&data->buf[data->prd],
77 debugtrace_bytes - data->prd);
78
79 /* Print youngest portion of the ring. */
80 data->buf[data->prd] = '\0';
81 console_serial_puts(&data->buf[0], data->prd);
82 printk("wrap: %lu\n", data->wrap_cnt);
83
84 memset(data->buf, '\0', debugtrace_bytes);
85 data->prd = 0;
86
87 printk("debugtrace_dump() %s buffer finished\n", which);
88 }
89
debugtrace_dump_worker(void)90 static void debugtrace_dump_worker(void)
91 {
92 unsigned int cpu;
93
94 if ( !debugtrace_used )
95 return;
96
97 debugtrace_dump_buffer(dt_data, "global");
98
99 for_each_online_cpu ( cpu )
100 {
101 char buf[16];
102
103 snprintf(buf, sizeof(buf), "cpu %u", cpu);
104 debugtrace_dump_buffer(per_cpu(dt_cpu_data, cpu), buf);
105 }
106
107 debugtrace_buf_empty = true;
108 }
109
debugtrace_toggle(void)110 static void debugtrace_toggle(void)
111 {
112 unsigned long flags;
113
114 watchdog_disable();
115 spin_lock_irqsave(&debugtrace_lock, flags);
116
117 /*
118 * Dump the buffer *before* toggling, in case the act of dumping the
119 * buffer itself causes more printk() invocations.
120 */
121 printk("debugtrace_printk now writing to %s.\n",
122 !debugtrace_send_to_console ? "console": "buffer");
123 if ( !debugtrace_send_to_console )
124 debugtrace_dump_worker();
125
126 debugtrace_send_to_console = !debugtrace_send_to_console;
127
128 spin_unlock_irqrestore(&debugtrace_lock, flags);
129 watchdog_enable();
130 }
131
debugtrace_dump(void)132 void debugtrace_dump(void)
133 {
134 unsigned long flags;
135
136 watchdog_disable();
137 spin_lock_irqsave(&debugtrace_lock, flags);
138
139 debugtrace_dump_worker();
140
141 spin_unlock_irqrestore(&debugtrace_lock, flags);
142 watchdog_enable();
143 }
144
debugtrace_add_to_buf(char * buf)145 static void debugtrace_add_to_buf(char *buf)
146 {
147 struct debugtrace_data *data;
148 char *p;
149
150 data = debugtrace_per_cpu ? this_cpu(dt_cpu_data) : dt_data;
151
152 for ( p = buf; *p != '\0'; p++ )
153 {
154 data->buf[data->prd++] = *p;
155 if ( data->prd == debugtrace_bytes )
156 data->prd = 0;
157 }
158 }
159
debugtrace_printk(const char * fmt,...)160 void debugtrace_printk(const char *fmt, ...)
161 {
162 static char buf[1024], last_buf[1024];
163 static unsigned int count, last_count, last_cpu;
164 static unsigned long last_prd, wrap_cnt;
165
166 char cntbuf[50];
167 va_list args;
168 unsigned long flags;
169 unsigned int nr;
170 struct debugtrace_data *data;
171
172 data = debugtrace_per_cpu ? this_cpu(dt_cpu_data) : dt_data;
173 if ( !data )
174 return;
175
176 debugtrace_used = true;
177
178 spin_lock_irqsave(&debugtrace_lock, flags);
179
180 va_start(args, fmt);
181 nr = vsnprintf(buf, sizeof(buf), fmt, args);
182 va_end(args);
183
184 if ( count == DEBUGTRACE_COUNT_WRAP )
185 {
186 count = 0;
187 wrap_cnt++;
188 }
189
190 if ( debugtrace_send_to_console )
191 {
192 unsigned int n;
193
194 if ( count == 0 )
195 {
196 n = scnprintf(cntbuf, sizeof(cntbuf), "wrap: %lu\n", wrap_cnt);
197 console_serial_puts(cntbuf, n);
198 }
199
200 n = scnprintf(cntbuf, sizeof(cntbuf), "%u ", ++count);
201 console_serial_puts(cntbuf, n);
202 console_serial_puts(buf, nr);
203 }
204 else
205 {
206 unsigned int cpu = debugtrace_per_cpu ? smp_processor_id() : 0;
207
208 if ( debugtrace_buf_empty || cpu != last_cpu ||
209 wrap_cnt != data->wrap_cnt || strcmp(buf, last_buf) )
210 {
211 if ( wrap_cnt != data->wrap_cnt )
212 {
213 snprintf(cntbuf, sizeof(cntbuf), "wrap: %lu->%lu\n",
214 data->wrap_cnt, wrap_cnt);
215 debugtrace_add_to_buf(cntbuf);
216 data->wrap_cnt = wrap_cnt;
217 }
218 debugtrace_buf_empty = false;
219 last_prd = data->prd;
220 last_count = ++count;
221 last_cpu = cpu;
222 safe_strcpy(last_buf, buf);
223 snprintf(cntbuf, sizeof(cntbuf), "%u ", count);
224 }
225 else
226 {
227 data->prd = last_prd;
228 snprintf(cntbuf, sizeof(cntbuf), "%u-%u ", last_count, ++count);
229 }
230 debugtrace_add_to_buf(cntbuf);
231 debugtrace_add_to_buf(buf);
232 }
233
234 spin_unlock_irqrestore(&debugtrace_lock, flags);
235 }
236
debugtrace_key(unsigned char key)237 static void cf_check debugtrace_key(unsigned char key)
238 {
239 debugtrace_toggle();
240 }
241
debugtrace_alloc_buffer(struct debugtrace_data ** ptr,unsigned int cpu)242 static void debugtrace_alloc_buffer(struct debugtrace_data **ptr,
243 unsigned int cpu)
244 {
245 int order;
246 struct debugtrace_data *data;
247
248 if ( *ptr )
249 return;
250
251 order = get_order_from_bytes(debugtrace_bytes);
252 data = alloc_xenheap_pages(order, 0);
253 if ( !data )
254 {
255 if ( debugtrace_per_cpu )
256 printk("CPU%u: failed to allocate debugtrace buffer\n", cpu);
257 else
258 printk("failed to allocate debugtrace buffer\n");
259 return;
260 }
261
262 memset(data, '\0', debugtrace_bytes + sizeof(*data));
263
264 *ptr = data;
265 }
266
debugtrace_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)267 static int cf_check debugtrace_cpu_callback(
268 struct notifier_block *nfb, unsigned long action, void *hcpu)
269 {
270 unsigned int cpu = (unsigned long)hcpu;
271
272 /* Buffers are only ever allocated, never freed. */
273 if ( action == CPU_UP_PREPARE )
274 debugtrace_alloc_buffer(&per_cpu(dt_cpu_data, cpu), cpu);
275
276 return 0;
277 }
278
279 static struct notifier_block debugtrace_nfb = {
280 .notifier_call = debugtrace_cpu_callback
281 };
282
debugtrace_init(void)283 static int __init cf_check debugtrace_init(void)
284 {
285 unsigned int cpu;
286
287 if ( !debugtrace_bytes )
288 return 0;
289
290 register_keyhandler('T', debugtrace_key,
291 "toggle debugtrace to console/buffer", 0);
292
293 debugtrace_bytes -= sizeof(struct debugtrace_data);
294
295 if ( debugtrace_per_cpu )
296 {
297 for_each_online_cpu ( cpu )
298 debugtrace_alloc_buffer(&per_cpu(dt_cpu_data, cpu), cpu);
299 register_cpu_notifier(&debugtrace_nfb);
300 }
301 else
302 debugtrace_alloc_buffer(&dt_data, 0);
303
304 return 0;
305 }
306 __initcall(debugtrace_init);
307