1 /******************************************************************************
2 * keyhandler.c
3 */
4
5 #include <asm/regs.h>
6 #include <xen/keyhandler.h>
7 #include <xen/shutdown.h>
8 #include <xen/event.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/sched.h>
12 #include <xen/tasklet.h>
13 #include <xen/domain.h>
14 #include <xen/rangeset.h>
15 #include <xen/compat.h>
16 #include <xen/ctype.h>
17 #include <xen/perfc.h>
18 #include <xen/mm.h>
19 #include <xen/watchdog.h>
20 #include <xen/init.h>
21 #include <asm/debugger.h>
22 #include <asm/div64.h>
23
24 static unsigned char keypress_key;
25 static bool_t alt_key_handling;
26
27 static keyhandler_fn_t show_handlers, dump_hwdom_registers,
28 dump_domains, read_clocks;
29 static irq_keyhandler_fn_t do_toggle_alt_key, dump_registers,
30 reboot_machine, run_all_keyhandlers, do_debug_key;
31
32 char keyhandler_scratch[1024];
33
34 static struct keyhandler {
35 union {
36 keyhandler_fn_t *fn;
37 irq_keyhandler_fn_t *irq_fn;
38 };
39
40 const char *desc; /* Description for help message. */
41 bool_t irq_callback, /* Call in irq context? if not, tasklet context. */
42 diagnostic; /* Include in 'dump all' handler. */
43 } key_table[128] __read_mostly =
44 {
45 #define KEYHANDLER(k, f, desc, diag) \
46 [k] = { { (f) }, desc, 0, diag }
47
48 #define IRQ_KEYHANDLER(k, f, desc, diag) \
49 [k] = { { (keyhandler_fn_t *)(f) }, desc, 1, diag }
50
51 IRQ_KEYHANDLER('A', do_toggle_alt_key, "toggle alternative key handling", 0),
52 IRQ_KEYHANDLER('d', dump_registers, "dump registers", 1),
53 KEYHANDLER('h', show_handlers, "show this message", 0),
54 KEYHANDLER('q', dump_domains, "dump domain (and guest debug) info", 1),
55 KEYHANDLER('r', dump_runq, "dump run queues", 1),
56 IRQ_KEYHANDLER('R', reboot_machine, "reboot machine", 0),
57 KEYHANDLER('t', read_clocks, "display multi-cpu clock info", 1),
58 KEYHANDLER('0', dump_hwdom_registers, "dump Dom0 registers", 1),
59 IRQ_KEYHANDLER('%', do_debug_key, "trap to xendbg", 0),
60 IRQ_KEYHANDLER('*', run_all_keyhandlers, "print all diagnostics", 0),
61
62 #ifdef CONFIG_PERF_COUNTERS
63 KEYHANDLER('p', perfc_printall, "print performance counters", 1),
64 KEYHANDLER('P', perfc_reset, "reset performance counters", 0),
65 #endif
66
67 #ifdef CONFIG_LOCK_PROFILE
68 KEYHANDLER('l', spinlock_profile_printall, "print lock profile info", 1),
69 KEYHANDLER('L', spinlock_profile_reset, "reset lock profile info", 0),
70 #endif
71
72 #undef IRQ_KEYHANDLER
73 #undef KEYHANDLER
74 };
75
keypress_action(unsigned long unused)76 static void keypress_action(unsigned long unused)
77 {
78 handle_keypress(keypress_key, NULL);
79 }
80
81 static DECLARE_TASKLET(keypress_tasklet, keypress_action, 0);
82
handle_keypress(unsigned char key,struct cpu_user_regs * regs)83 void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
84 {
85 struct keyhandler *h;
86
87 if ( key >= ARRAY_SIZE(key_table) || !(h = &key_table[key])->fn )
88 return;
89
90 if ( !in_irq() || h->irq_callback )
91 {
92 console_start_log_everything();
93 h->irq_callback ? h->irq_fn(key, regs) : h->fn(key);
94 console_end_log_everything();
95 }
96 else
97 {
98 keypress_key = key;
99 tasklet_schedule(&keypress_tasklet);
100 }
101 }
102
register_keyhandler(unsigned char key,keyhandler_fn_t fn,const char * desc,bool_t diagnostic)103 void register_keyhandler(unsigned char key, keyhandler_fn_t fn,
104 const char *desc, bool_t diagnostic)
105 {
106 BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */
107 ASSERT(!key_table[key].fn); /* Clobbering something else? */
108
109 key_table[key].fn = fn;
110 key_table[key].desc = desc;
111 key_table[key].irq_callback = 0;
112 key_table[key].diagnostic = diagnostic;
113 }
114
register_irq_keyhandler(unsigned char key,irq_keyhandler_fn_t fn,const char * desc,bool_t diagnostic)115 void register_irq_keyhandler(unsigned char key, irq_keyhandler_fn_t fn,
116 const char *desc, bool_t diagnostic)
117 {
118 BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */
119 ASSERT(!key_table[key].irq_fn); /* Clobbering something else? */
120
121 key_table[key].irq_fn = fn;
122 key_table[key].desc = desc;
123 key_table[key].irq_callback = 1;
124 key_table[key].diagnostic = diagnostic;
125 }
126
show_handlers(unsigned char key)127 static void show_handlers(unsigned char key)
128 {
129 unsigned int i;
130
131 printk("'%c' pressed -> showing installed handlers\n", key);
132 for ( i = 0; i < ARRAY_SIZE(key_table); i++ )
133 if ( key_table[i].fn )
134 printk(" key '%c' (ascii '%02x') => %s\n",
135 isprint(i) ? i : ' ', i, key_table[i].desc);
136 }
137
138 static cpumask_t dump_execstate_mask;
139
dump_execstate(struct cpu_user_regs * regs)140 void dump_execstate(struct cpu_user_regs *regs)
141 {
142 unsigned int cpu = smp_processor_id();
143
144 if ( !guest_mode(regs) )
145 {
146 printk("*** Dumping CPU%u host state: ***\n", cpu);
147 show_execution_state(regs);
148 }
149
150 if ( !is_idle_vcpu(current) )
151 {
152 printk("*** Dumping CPU%u guest state (%pv): ***\n",
153 smp_processor_id(), current);
154 show_execution_state(guest_cpu_user_regs());
155 printk("\n");
156 }
157
158 cpumask_clear_cpu(cpu, &dump_execstate_mask);
159 if ( !alt_key_handling )
160 return;
161
162 cpu = cpumask_cycle(cpu, &dump_execstate_mask);
163 if ( cpu < nr_cpu_ids )
164 {
165 smp_send_state_dump(cpu);
166 return;
167 }
168
169 console_end_sync();
170 watchdog_enable();
171 }
172
dump_registers(unsigned char key,struct cpu_user_regs * regs)173 static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
174 {
175 unsigned int cpu;
176
177 /* We want to get everything out that we possibly can. */
178 watchdog_disable();
179 console_start_sync();
180
181 printk("'%c' pressed -> dumping registers\n\n", key);
182
183 cpumask_copy(&dump_execstate_mask, &cpu_online_map);
184
185 /* Get local execution state out immediately, in case we get stuck. */
186 dump_execstate(regs);
187
188 /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */
189 if ( alt_key_handling )
190 return;
191
192 /* Normal handling: synchronously dump the remaining CPUs' states. */
193 for_each_cpu ( cpu, &dump_execstate_mask )
194 {
195 smp_send_state_dump(cpu);
196 while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
197 cpu_relax();
198 }
199
200 console_end_sync();
201 watchdog_enable();
202 }
203
204 static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, 0);
205
dump_hwdom_action(unsigned long arg)206 static void dump_hwdom_action(unsigned long arg)
207 {
208 struct vcpu *v = (void *)arg;
209
210 for ( ; ; )
211 {
212 vcpu_show_execution_state(v);
213 if ( (v = v->next_in_list) == NULL )
214 break;
215 if ( softirq_pending(smp_processor_id()) )
216 {
217 dump_hwdom_tasklet.data = (unsigned long)v;
218 tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
219 break;
220 }
221 }
222 }
223
dump_hwdom_registers(unsigned char key)224 static void dump_hwdom_registers(unsigned char key)
225 {
226 struct vcpu *v;
227
228 if ( hardware_domain == NULL )
229 return;
230
231 printk("'%c' pressed -> dumping Dom0's registers\n", key);
232
233 for_each_vcpu ( hardware_domain, v )
234 {
235 if ( alt_key_handling && softirq_pending(smp_processor_id()) )
236 {
237 tasklet_kill(&dump_hwdom_tasklet);
238 tasklet_init(&dump_hwdom_tasklet, dump_hwdom_action,
239 (unsigned long)v);
240 tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
241 return;
242 }
243 vcpu_show_execution_state(v);
244 }
245 }
246
reboot_machine(unsigned char key,struct cpu_user_regs * regs)247 static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
248 {
249 printk("'%c' pressed -> rebooting machine\n", key);
250 machine_restart(0);
251 }
252
cpuset_print(char * set,int size,const cpumask_t * mask)253 static void cpuset_print(char *set, int size, const cpumask_t *mask)
254 {
255 *set++ = '{';
256 set += cpulist_scnprintf(set, size-2, mask);
257 *set++ = '}';
258 *set++ = '\0';
259 }
260
nodeset_print(char * set,int size,const nodemask_t * mask)261 static void nodeset_print(char *set, int size, const nodemask_t *mask)
262 {
263 *set++ = '[';
264 set += nodelist_scnprintf(set, size-2, mask);
265 *set++ = ']';
266 *set++ = '\0';
267 }
268
periodic_timer_print(char * str,int size,uint64_t period)269 static void periodic_timer_print(char *str, int size, uint64_t period)
270 {
271 if ( period == 0 )
272 {
273 strlcpy(str, "No periodic timer", size);
274 return;
275 }
276
277 snprintf(str, size,
278 "%u Hz periodic timer (period %u ms)",
279 1000000000/(int)period, (int)period/1000000);
280 }
281
dump_domains(unsigned char key)282 static void dump_domains(unsigned char key)
283 {
284 struct domain *d;
285 struct vcpu *v;
286 s_time_t now = NOW();
287 #define tmpstr keyhandler_scratch
288
289 printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key,
290 (u32)(now>>32), (u32)now);
291
292 rcu_read_lock(&domlist_read_lock);
293
294 for_each_domain ( d )
295 {
296 unsigned int i;
297
298 process_pending_softirqs();
299
300 printk("General information for domain %u:\n", d->domain_id);
301 cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
302 printk(" refcnt=%d dying=%d pause_count=%d\n",
303 atomic_read(&d->refcnt), d->is_dying,
304 atomic_read(&d->pause_count));
305 printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
306 "dirty_cpus=%s max_pages=%u\n", d->tot_pages, d->xenheap_pages,
307 atomic_read(&d->shr_pages), atomic_read(&d->paged_pages),
308 tmpstr, d->max_pages);
309 printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
310 "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
311 d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
312 d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
313 d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
314 d->handle[12], d->handle[13], d->handle[14], d->handle[15],
315 d->vm_assist);
316 for ( i = 0 ; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ )
317 if ( test_bit(i, &d->watchdog_inuse_map) )
318 printk(" watchdog %d expires in %d seconds\n",
319 i, (u32)((d->watchdog_timer[i].expires - NOW()) >> 30));
320
321 arch_dump_domain_info(d);
322
323 rangeset_domain_printk(d);
324
325 dump_pageframe_info(d);
326
327 nodeset_print(tmpstr, sizeof(tmpstr), &d->node_affinity);
328 printk("NODE affinity for domain %d: %s\n", d->domain_id, tmpstr);
329
330 printk("VCPU information and callbacks for domain %u:\n",
331 d->domain_id);
332 for_each_vcpu ( d, v )
333 {
334 if ( !(v->vcpu_id & 0x3f) )
335 process_pending_softirqs();
336
337 printk(" VCPU%d: CPU%d [has=%c] poll=%d "
338 "upcall_pend=%02x upcall_mask=%02x ",
339 v->vcpu_id, v->processor,
340 v->is_running ? 'T':'F', v->poll_evtchn,
341 vcpu_info(v, evtchn_upcall_pending),
342 !vcpu_event_delivery_is_enabled(v));
343 cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
344 printk("dirty_cpus=%s\n", tmpstr);
345 cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
346 printk(" cpu_hard_affinity=%s ", tmpstr);
347 cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
348 printk("cpu_soft_affinity=%s\n", tmpstr);
349 printk(" pause_count=%d pause_flags=%lx\n",
350 atomic_read(&v->pause_count), v->pause_flags);
351 arch_dump_vcpu_info(v);
352 periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
353 printk(" %s\n", tmpstr);
354 }
355 }
356
357 for_each_domain ( d )
358 {
359 for_each_vcpu ( d, v )
360 {
361 if ( !(v->vcpu_id & 0x3f) )
362 process_pending_softirqs();
363
364 printk("Notifying guest %d:%d (virq %d, port %d)\n",
365 d->domain_id, v->vcpu_id,
366 VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG]);
367 send_guest_vcpu_virq(v, VIRQ_DEBUG);
368 }
369 }
370
371 arch_dump_shared_mem_info();
372
373 rcu_read_unlock(&domlist_read_lock);
374 #undef tmpstr
375 }
376
377 static cpumask_t read_clocks_cpumask;
378 static DEFINE_PER_CPU(s_time_t, read_clocks_time);
379 static DEFINE_PER_CPU(u64, read_cycles_time);
380
read_clocks_slave(void * unused)381 static void read_clocks_slave(void *unused)
382 {
383 unsigned int cpu = smp_processor_id();
384 local_irq_disable();
385 while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) )
386 cpu_relax();
387 per_cpu(read_clocks_time, cpu) = NOW();
388 per_cpu(read_cycles_time, cpu) = get_cycles();
389 cpumask_clear_cpu(cpu, &read_clocks_cpumask);
390 local_irq_enable();
391 }
392
read_clocks(unsigned char key)393 static void read_clocks(unsigned char key)
394 {
395 unsigned int cpu = smp_processor_id(), min_stime_cpu, max_stime_cpu;
396 unsigned int min_cycles_cpu, max_cycles_cpu;
397 u64 min_stime, max_stime, dif_stime;
398 u64 min_cycles, max_cycles, dif_cycles;
399 static u64 sumdif_stime = 0, maxdif_stime = 0;
400 static u64 sumdif_cycles = 0, maxdif_cycles = 0;
401 static u32 count = 0;
402 static DEFINE_SPINLOCK(lock);
403
404 spin_lock(&lock);
405
406 smp_call_function(read_clocks_slave, NULL, 0);
407
408 local_irq_disable();
409 cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu));
410 per_cpu(read_clocks_time, cpu) = NOW();
411 per_cpu(read_cycles_time, cpu) = get_cycles();
412 local_irq_enable();
413
414 while ( !cpumask_empty(&read_clocks_cpumask) )
415 cpu_relax();
416
417 min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
418 for_each_online_cpu ( cpu )
419 {
420 if ( per_cpu(read_clocks_time, cpu) <
421 per_cpu(read_clocks_time, min_stime_cpu) )
422 min_stime_cpu = cpu;
423 if ( per_cpu(read_clocks_time, cpu) >
424 per_cpu(read_clocks_time, max_stime_cpu) )
425 max_stime_cpu = cpu;
426 if ( per_cpu(read_cycles_time, cpu) <
427 per_cpu(read_cycles_time, min_cycles_cpu) )
428 min_cycles_cpu = cpu;
429 if ( per_cpu(read_cycles_time, cpu) >
430 per_cpu(read_cycles_time, max_cycles_cpu) )
431 max_cycles_cpu = cpu;
432 }
433
434 min_stime = per_cpu(read_clocks_time, min_stime_cpu);
435 max_stime = per_cpu(read_clocks_time, max_stime_cpu);
436 min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
437 max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
438
439 spin_unlock(&lock);
440
441 dif_stime = max_stime - min_stime;
442 if ( dif_stime > maxdif_stime )
443 maxdif_stime = dif_stime;
444 sumdif_stime += dif_stime;
445 dif_cycles = max_cycles - min_cycles;
446 if ( dif_cycles > maxdif_cycles )
447 maxdif_cycles = dif_cycles;
448 sumdif_cycles += dif_cycles;
449 count++;
450 printk("Synced stime skew: max=%"PRIu64"ns avg=%"PRIu64"ns "
451 "samples=%"PRIu32" current=%"PRIu64"ns\n",
452 maxdif_stime, sumdif_stime/count, count, dif_stime);
453 printk("Synced cycles skew: max=%"PRIu64" avg=%"PRIu64" "
454 "samples=%"PRIu32" current=%"PRIu64"\n",
455 maxdif_cycles, sumdif_cycles/count, count, dif_cycles);
456 }
457
run_all_nonirq_keyhandlers(unsigned long unused)458 static void run_all_nonirq_keyhandlers(unsigned long unused)
459 {
460 /* Fire all the non-IRQ-context diagnostic keyhandlers */
461 struct keyhandler *h;
462 int k;
463
464 console_start_log_everything();
465
466 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
467 {
468 process_pending_softirqs();
469 h = &key_table[k];
470 if ( !h->fn || !h->diagnostic || h->irq_callback )
471 continue;
472 printk("[%c: %s]\n", k, h->desc);
473 h->fn(k);
474 }
475
476 console_end_log_everything();
477 }
478
479 static DECLARE_TASKLET(run_all_keyhandlers_tasklet,
480 run_all_nonirq_keyhandlers, 0);
481
run_all_keyhandlers(unsigned char key,struct cpu_user_regs * regs)482 static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs)
483 {
484 struct keyhandler *h;
485 unsigned int k;
486
487 watchdog_disable();
488
489 printk("'%c' pressed -> firing all diagnostic keyhandlers\n", key);
490
491 /* Fire all the IRQ-context diangostic keyhandlers now */
492 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
493 {
494 h = &key_table[k];
495 if ( !h->irq_fn || !h->diagnostic || !h->irq_callback )
496 continue;
497 printk("[%c: %s]\n", k, h->desc);
498 h->irq_fn(k, regs);
499 }
500
501 watchdog_enable();
502
503 /* Trigger the others from a tasklet in non-IRQ context */
504 tasklet_schedule(&run_all_keyhandlers_tasklet);
505 }
506
do_debug_key(unsigned char key,struct cpu_user_regs * regs)507 static void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
508 {
509 printk("'%c' pressed -> trapping into debugger\n", key);
510 (void)debugger_trap_fatal(0xf001, regs);
511 nop(); /* Prevent the compiler doing tail call
512 optimisation, as that confuses xendbg a
513 bit. */
514 }
515
do_toggle_alt_key(unsigned char key,struct cpu_user_regs * regs)516 static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs)
517 {
518 alt_key_handling = !alt_key_handling;
519 printk("'%c' pressed -> using %s key handling\n", key,
520 alt_key_handling ? "alternative" : "normal");
521 }
522
initialize_keytable(void)523 void __init initialize_keytable(void)
524 {
525 if ( num_present_cpus() > 16 )
526 {
527 alt_key_handling = 1;
528 printk(XENLOG_INFO "Defaulting to alternative key handling; "
529 "send 'A' to switch to normal mode.\n");
530 }
531 }
532
533 /*
534 * Local variables:
535 * mode: C
536 * c-file-style: "BSD"
537 * c-basic-offset: 4
538 * tab-width: 4
539 * indent-tabs-mode: nil
540 * End:
541 */
542