1 #include <xen/cpu.h>
2 #include <xen/lib.h>
3 #include <xen/irq.h>
4 #include <xen/notifier.h>
5 #include <xen/param.h>
6 #include <xen/smp.h>
7 #include <xen/time.h>
8 #include <xen/sections.h>
9 #include <xen/spinlock.h>
10 #include <xen/guest_access.h>
11 #include <xen/preempt.h>
12 #include <public/sysctl.h>
13 #include <asm/processor.h>
14 #include <asm/atomic.h>
15 
16 #ifdef CONFIG_DEBUG_LOCKS
17 
18 /* Max. number of entries in locks_taken array. */
19 static unsigned int __ro_after_init lock_depth_size = 64;
20 integer_param("lock-depth-size", lock_depth_size);
21 
22 /*
23  * Array of addresses of taken locks.
24  * nr_locks_taken is the index after the last entry. As locks tend to be
25  * nested cleanly, when freeing a lock it will probably be the one before
26  * nr_locks_taken, and new entries can be entered at that index. It is fine
27  * for a lock to be released out of order, though.
28  */
29 static DEFINE_PER_CPU(const union lock_debug **, locks_taken);
30 static DEFINE_PER_CPU(unsigned int, nr_locks_taken);
31 static bool __read_mostly max_depth_reached;
32 
33 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
34 
cpu_lockdebug_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)35 static int cf_check cpu_lockdebug_callback(struct notifier_block *nfb,
36                                            unsigned long action,
37                                            void *hcpu)
38 {
39     unsigned int cpu = (unsigned long)hcpu;
40 
41     switch ( action )
42     {
43     case CPU_UP_PREPARE:
44         if ( !per_cpu(locks_taken, cpu) )
45             per_cpu(locks_taken, cpu) = xzalloc_array(const union lock_debug *,
46                                                       lock_depth_size);
47         if ( !per_cpu(locks_taken, cpu) )
48             printk(XENLOG_WARNING
49                    "cpu %u: failed to allocate lock recursion check area\n",
50                    cpu);
51         break;
52 
53     case CPU_UP_CANCELED:
54     case CPU_DEAD:
55         XFREE(per_cpu(locks_taken, cpu));
56         break;
57 
58     default:
59         break;
60     }
61 
62     return 0;
63 }
64 
65 static struct notifier_block cpu_lockdebug_nfb = {
66     .notifier_call = cpu_lockdebug_callback,
67 };
68 
lockdebug_init(void)69 static int __init cf_check lockdebug_init(void)
70 {
71     if ( lock_depth_size )
72     {
73         register_cpu_notifier(&cpu_lockdebug_nfb);
74         cpu_lockdebug_callback(&cpu_lockdebug_nfb, CPU_UP_PREPARE,
75                                (void *)(unsigned long)smp_processor_id());
76     }
77 
78     return 0;
79 }
80 presmp_initcall(lockdebug_init);
81 
check_lock(union lock_debug * debug,bool try)82 void check_lock(union lock_debug *debug, bool try)
83 {
84     bool irq_safe = !local_irq_is_enabled();
85     unsigned int cpu = smp_processor_id();
86     const union lock_debug *const *taken = per_cpu(locks_taken, cpu);
87     unsigned int nr_taken = per_cpu(nr_locks_taken, cpu);
88     unsigned int i;
89 
90     BUILD_BUG_ON(LOCK_DEBUG_PAD_BITS <= 0);
91 
92     if ( unlikely(atomic_read(&spin_debug) <= 0) )
93         return;
94 
95     /* A few places take liberties with this. */
96     /* BUG_ON(in_irq() && !irq_safe); */
97 
98     /*
99      * We partition locks into IRQ-safe (always held with IRQs disabled) and
100      * IRQ-unsafe (always held with IRQs enabled) types. The convention for
101      * every lock must be consistently observed else we can deadlock in
102      * IRQ-context rendezvous functions (a rendezvous which gets every CPU
103      * into IRQ context before any CPU is released from the rendezvous).
104      *
105      * If we can mix IRQ-disabled and IRQ-enabled callers, the following can
106      * happen:
107      *  * Lock is held by CPU A, with IRQs enabled
108      *  * CPU B is spinning on same lock, with IRQs disabled
109      *  * Rendezvous starts -- CPU A takes interrupt and enters rendezbous spin
110      *  * DEADLOCK -- CPU B will never enter rendezvous, CPU A will never exit
111      *                the rendezvous, and will hence never release the lock.
112      *
113      * To guard against this subtle bug we latch the IRQ safety of every
114      * spinlock in the system, on first use.
115      *
116      * A spin_trylock() with interrupts off is always fine, as this can't
117      * block and above deadlock scenario doesn't apply.
118      */
119     if ( try && irq_safe )
120         return;
121 
122     if ( unlikely(debug->irq_safe != irq_safe) )
123     {
124         union lock_debug seen, new = { 0 };
125 
126         new.irq_safe = irq_safe;
127         seen.val = cmpxchg(&debug->val, LOCK_DEBUG_INITVAL, new.val);
128 
129         if ( !seen.unseen && seen.irq_safe == !irq_safe )
130         {
131             printk("CHECKLOCK FAILURE: prev irqsafe: %d, curr irqsafe %d\n",
132                    seen.irq_safe, irq_safe);
133             BUG();
134         }
135     }
136 
137     if ( try )
138         return;
139 
140     for ( i = 0; i < nr_taken; i++ )
141         if ( taken[i] == debug )
142         {
143             printk("CHECKLOCK FAILURE: lock at %p taken recursively\n", debug);
144             BUG();
145         }
146 }
147 
check_barrier(union lock_debug * debug)148 static void check_barrier(union lock_debug *debug)
149 {
150     if ( unlikely(atomic_read(&spin_debug) <= 0) )
151         return;
152 
153     /*
154      * For a barrier, we have a relaxed IRQ-safety-consistency check.
155      *
156      * It is always safe to spin at the barrier with IRQs enabled -- that does
157      * not prevent us from entering an IRQ-context rendezvous, and nor are
158      * we preventing anyone else from doing so (since we do not actually
159      * acquire the lock during a barrier operation).
160      *
161      * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
162      * is clearly wrong, for the same reason outlined in check_lock() above.
163      */
164     BUG_ON(!local_irq_is_enabled() && !debug->irq_safe);
165 }
166 
lock_enter(const union lock_debug * debug)167 void lock_enter(const union lock_debug *debug)
168 {
169     unsigned int cpu = smp_processor_id();
170     const union lock_debug **taken = per_cpu(locks_taken, cpu);
171     unsigned int *nr_taken = &per_cpu(nr_locks_taken, cpu);
172     unsigned long flags;
173 
174     if ( !taken )
175         return;
176 
177     local_irq_save(flags);
178 
179     if ( *nr_taken < lock_depth_size )
180         taken[(*nr_taken)++] = debug;
181     else if ( !max_depth_reached )
182     {
183         max_depth_reached = true;
184         printk("CHECKLOCK max lock depth %u reached!\n", lock_depth_size);
185         WARN();
186     }
187 
188     local_irq_restore(flags);
189 }
190 
lock_exit(const union lock_debug * debug)191 void lock_exit(const union lock_debug *debug)
192 {
193     unsigned int cpu = smp_processor_id();
194     const union lock_debug **taken = per_cpu(locks_taken, cpu);
195     unsigned int *nr_taken = &per_cpu(nr_locks_taken, cpu);
196     unsigned int i;
197     unsigned long flags;
198 
199     if ( !taken )
200         return;
201 
202     local_irq_save(flags);
203 
204     for ( i = *nr_taken; i > 0; i-- )
205     {
206         if ( taken[i - 1] == debug )
207         {
208             memmove(taken + i - 1, taken + i,
209                     (*nr_taken - i) * sizeof(*taken));
210             (*nr_taken)--;
211             taken[*nr_taken] = NULL;
212 
213             local_irq_restore(flags);
214 
215             return;
216         }
217     }
218 
219     if ( !max_depth_reached )
220     {
221         printk("CHECKLOCK released lock at %p not recorded!\n", debug);
222         WARN();
223     }
224 
225     local_irq_restore(flags);
226 }
227 
got_lock(union lock_debug * debug)228 static void got_lock(union lock_debug *debug)
229 {
230     debug->cpu = smp_processor_id();
231 
232     lock_enter(debug);
233 }
234 
rel_lock(union lock_debug * debug)235 static void rel_lock(union lock_debug *debug)
236 {
237     if ( atomic_read(&spin_debug) > 0 )
238         BUG_ON(debug->cpu != smp_processor_id());
239 
240     lock_exit(debug);
241 
242     debug->cpu = SPINLOCK_NO_CPU;
243 }
244 
spin_debug_enable(void)245 void spin_debug_enable(void)
246 {
247     atomic_inc(&spin_debug);
248 }
249 
spin_debug_disable(void)250 void spin_debug_disable(void)
251 {
252     atomic_dec(&spin_debug);
253 }
254 
255 #else /* CONFIG_DEBUG_LOCKS */
256 
257 #define check_barrier(l) ((void)0)
258 #define got_lock(l) ((void)0)
259 #define rel_lock(l) ((void)0)
260 
261 #endif
262 
263 #ifdef CONFIG_DEBUG_LOCK_PROFILE
264 
265 #define LOCK_PROFILE_PAR lock->profile
266 #define LOCK_PROFILE_REL                                                     \
267     if ( profile )                                                           \
268     {                                                                        \
269         profile->time_hold += NOW() - profile->time_locked;                  \
270         profile->lock_cnt++;                                                 \
271     }
272 #define LOCK_PROFILE_VAR(var, val)    s_time_t var = (val)
273 #define LOCK_PROFILE_BLOCK(var)       (var) = (var) ? : NOW()
274 #define LOCK_PROFILE_BLKACC(tst, val)                                        \
275     if ( tst )                                                               \
276     {                                                                        \
277         profile->time_block += profile->time_locked - (val);                 \
278         profile->block_cnt++;                                                \
279     }
280 #define LOCK_PROFILE_GOT(val)                                                \
281     if ( profile )                                                           \
282     {                                                                        \
283         profile->time_locked = NOW();                                        \
284         LOCK_PROFILE_BLKACC(val, val);                                       \
285     }
286 
287 #else
288 
289 #define LOCK_PROFILE_PAR NULL
290 #define LOCK_PROFILE_REL
291 #define LOCK_PROFILE_VAR(var, val)
292 #define LOCK_PROFILE_BLOCK(var)
293 #define LOCK_PROFILE_BLKACC(tst, val)
294 #define LOCK_PROFILE_GOT(val)
295 
296 #endif
297 
observe_lock(spinlock_tickets_t * t)298 static always_inline spinlock_tickets_t observe_lock(spinlock_tickets_t *t)
299 {
300     spinlock_tickets_t v;
301 
302     smp_rmb();
303     v.head_tail = read_atomic(&t->head_tail);
304     return v;
305 }
306 
observe_head(const spinlock_tickets_t * t)307 static always_inline uint16_t observe_head(const spinlock_tickets_t *t)
308 {
309     smp_rmb();
310     return read_atomic(&t->head);
311 }
312 
spin_lock_common(spinlock_tickets_t * t,union lock_debug * debug,struct lock_profile * profile,void (* cb)(void * data),void * data)313 static void always_inline spin_lock_common(spinlock_tickets_t *t,
314                                            union lock_debug *debug,
315                                            struct lock_profile *profile,
316                                            void (*cb)(void *data), void *data)
317 {
318     spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
319     LOCK_PROFILE_VAR(block, 0);
320 
321     check_lock(debug, false);
322     preempt_disable();
323     tickets.head_tail = arch_fetch_and_add(&t->head_tail, tickets.head_tail);
324     while ( tickets.tail != observe_head(t) )
325     {
326         LOCK_PROFILE_BLOCK(block);
327         if ( cb )
328             cb(data);
329         arch_lock_relax();
330     }
331     arch_lock_acquire_barrier();
332     got_lock(debug);
333     LOCK_PROFILE_GOT(block);
334 }
335 
_spin_lock(spinlock_t * lock)336 void _spin_lock(spinlock_t *lock)
337 {
338     spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
339                      NULL);
340 }
341 
_spin_lock_cb(spinlock_t * lock,void (* cb)(void * data),void * data)342 void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data)
343 {
344     spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, cb, data);
345 }
346 
_spin_lock_irq(spinlock_t * lock)347 void _spin_lock_irq(spinlock_t *lock)
348 {
349     ASSERT(local_irq_is_enabled());
350     local_irq_disable();
351     _spin_lock(lock);
352 }
353 
_spin_lock_irqsave(spinlock_t * lock)354 unsigned long _spin_lock_irqsave(spinlock_t *lock)
355 {
356     unsigned long flags;
357 
358     local_irq_save(flags);
359     _spin_lock(lock);
360     return flags;
361 }
362 
spin_unlock_common(spinlock_tickets_t * t,union lock_debug * debug,struct lock_profile * profile)363 static void always_inline spin_unlock_common(spinlock_tickets_t *t,
364                                              union lock_debug *debug,
365                                              struct lock_profile *profile)
366 {
367     LOCK_PROFILE_REL;
368     rel_lock(debug);
369     arch_lock_release_barrier();
370     add_sized(&t->head, 1);
371     arch_lock_signal();
372     preempt_enable();
373 }
374 
_spin_unlock(spinlock_t * lock)375 void _spin_unlock(spinlock_t *lock)
376 {
377     spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
378 }
379 
_spin_unlock_irq(spinlock_t * lock)380 void _spin_unlock_irq(spinlock_t *lock)
381 {
382     _spin_unlock(lock);
383     local_irq_enable();
384 }
385 
_spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags)386 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
387 {
388     _spin_unlock(lock);
389     local_irq_restore(flags);
390 }
391 
spin_is_locked_common(const spinlock_tickets_t * t)392 static bool always_inline spin_is_locked_common(const spinlock_tickets_t *t)
393 {
394     return t->head != t->tail;
395 }
396 
_spin_is_locked(const spinlock_t * lock)397 bool _spin_is_locked(const spinlock_t *lock)
398 {
399     /*
400      * This function is suitable only for use in ASSERT()s and alike, as it
401      * doesn't tell _who_ is holding the lock.
402      */
403     return spin_is_locked_common(&lock->tickets);
404 }
405 
spin_trylock_common(spinlock_tickets_t * t,union lock_debug * debug,struct lock_profile * profile)406 static bool always_inline spin_trylock_common(spinlock_tickets_t *t,
407                                               union lock_debug *debug,
408                                               struct lock_profile *profile)
409 {
410     spinlock_tickets_t old, new;
411 
412     preempt_disable();
413     check_lock(debug, true);
414     old = observe_lock(t);
415     if ( old.head != old.tail )
416     {
417         preempt_enable();
418         return false;
419     }
420     new = old;
421     new.tail++;
422     if ( cmpxchg(&t->head_tail, old.head_tail, new.head_tail) != old.head_tail )
423     {
424         preempt_enable();
425         return false;
426     }
427     /*
428      * cmpxchg() is a full barrier so no need for an
429      * arch_lock_acquire_barrier().
430      */
431     got_lock(debug);
432     LOCK_PROFILE_GOT(0);
433 
434     return true;
435 }
436 
_spin_trylock(spinlock_t * lock)437 bool _spin_trylock(spinlock_t *lock)
438 {
439     return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
440 }
441 
spin_barrier_common(spinlock_tickets_t * t,union lock_debug * debug,struct lock_profile * profile)442 static void always_inline spin_barrier_common(spinlock_tickets_t *t,
443                                               union lock_debug *debug,
444                                               struct lock_profile *profile)
445 {
446     spinlock_tickets_t sample;
447     LOCK_PROFILE_VAR(block, NOW());
448 
449     check_barrier(debug);
450     smp_mb();
451     sample = observe_lock(t);
452     if ( sample.head != sample.tail )
453     {
454         while ( observe_head(t) == sample.head )
455             arch_lock_relax();
456         LOCK_PROFILE_BLKACC(profile, block);
457     }
458     smp_mb();
459 }
460 
_spin_barrier(spinlock_t * lock)461 void _spin_barrier(spinlock_t *lock)
462 {
463     spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
464 }
465 
_rspin_is_locked(const rspinlock_t * lock)466 bool _rspin_is_locked(const rspinlock_t *lock)
467 {
468     /*
469      * Recursive locks may be locked by another CPU, yet we return
470      * "false" here, making this function suitable only for use in
471      * ASSERT()s and alike.
472      */
473     return lock->recurse_cpu == SPINLOCK_NO_CPU
474            ? spin_is_locked_common(&lock->tickets)
475            : lock->recurse_cpu == smp_processor_id();
476 }
477 
_rspin_barrier(rspinlock_t * lock)478 void _rspin_barrier(rspinlock_t *lock)
479 {
480     spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
481 }
482 
_rspin_trylock(rspinlock_t * lock)483 bool _rspin_trylock(rspinlock_t *lock)
484 {
485     unsigned int cpu = smp_processor_id();
486 
487     /* Don't allow overflow of recurse_cpu field. */
488     BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
489     BUILD_BUG_ON(SPINLOCK_CPU_BITS > sizeof(lock->recurse_cpu) * 8);
490     BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
491     BUILD_BUG_ON(SPINLOCK_MAX_RECURSE > ((1u << SPINLOCK_RECURSE_BITS) - 1));
492 
493     check_lock(&lock->debug, true);
494 
495     if ( likely(lock->recurse_cpu != cpu) )
496     {
497         if ( !spin_trylock_common(&lock->tickets, &lock->debug,
498                                   LOCK_PROFILE_PAR) )
499             return false;
500         lock->recurse_cpu = cpu;
501     }
502 
503     /* We support only fairly shallow recursion, else the counter overflows. */
504     ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
505     lock->recurse_cnt++;
506 
507     return true;
508 }
509 
_rspin_lock(rspinlock_t * lock)510 void _rspin_lock(rspinlock_t *lock)
511 {
512     unsigned int cpu = smp_processor_id();
513 
514     if ( likely(lock->recurse_cpu != cpu) )
515     {
516         spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
517                          NULL);
518         lock->recurse_cpu = cpu;
519     }
520 
521     /* We support only fairly shallow recursion, else the counter overflows. */
522     ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
523     lock->recurse_cnt++;
524 }
525 
_rspin_lock_irqsave(rspinlock_t * lock)526 unsigned long _rspin_lock_irqsave(rspinlock_t *lock)
527 {
528     unsigned long flags;
529 
530     local_irq_save(flags);
531     _rspin_lock(lock);
532 
533     return flags;
534 }
535 
_rspin_unlock(rspinlock_t * lock)536 void _rspin_unlock(rspinlock_t *lock)
537 {
538     if ( likely(--lock->recurse_cnt == 0) )
539     {
540         lock->recurse_cpu = SPINLOCK_NO_CPU;
541         spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
542     }
543 }
544 
_rspin_unlock_irqrestore(rspinlock_t * lock,unsigned long flags)545 void _rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags)
546 {
547     _rspin_unlock(lock);
548     local_irq_restore(flags);
549 }
550 
_nrspin_trylock(rspinlock_t * lock)551 bool _nrspin_trylock(rspinlock_t *lock)
552 {
553     check_lock(&lock->debug, true);
554 
555     if ( unlikely(lock->recurse_cpu != SPINLOCK_NO_CPU) )
556         return false;
557 
558     return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
559 }
560 
_nrspin_lock(rspinlock_t * lock)561 void _nrspin_lock(rspinlock_t *lock)
562 {
563     spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
564                      NULL);
565 }
566 
_nrspin_unlock(rspinlock_t * lock)567 void _nrspin_unlock(rspinlock_t *lock)
568 {
569     spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
570 }
571 
_nrspin_lock_irq(rspinlock_t * lock)572 void _nrspin_lock_irq(rspinlock_t *lock)
573 {
574     ASSERT(local_irq_is_enabled());
575     local_irq_disable();
576     _nrspin_lock(lock);
577 }
578 
_nrspin_unlock_irq(rspinlock_t * lock)579 void _nrspin_unlock_irq(rspinlock_t *lock)
580 {
581     _nrspin_unlock(lock);
582     local_irq_enable();
583 }
584 
_nrspin_lock_irqsave(rspinlock_t * lock)585 unsigned long _nrspin_lock_irqsave(rspinlock_t *lock)
586 {
587     unsigned long flags;
588 
589     local_irq_save(flags);
590     _nrspin_lock(lock);
591 
592     return flags;
593 }
594 
_nrspin_unlock_irqrestore(rspinlock_t * lock,unsigned long flags)595 void _nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags)
596 {
597     _nrspin_unlock(lock);
598     local_irq_restore(flags);
599 }
600 
601 #ifdef CONFIG_DEBUG_LOCK_PROFILE
602 
603 struct lock_profile_anc {
604     struct lock_profile_qhead *head_q;   /* first head of this type */
605     const char                *name;     /* descriptive string for print */
606 };
607 
608 typedef void lock_profile_subfunc(struct lock_profile *data, int32_t type,
609     int32_t idx, void *par);
610 
611 static s_time_t lock_profile_start;
612 static struct lock_profile_anc lock_profile_ancs[] = {
613     [LOCKPROF_TYPE_GLOBAL] = { .name = "Global" },
614     [LOCKPROF_TYPE_PERDOM] = { .name = "Domain" },
615 };
616 static struct lock_profile_qhead lock_profile_glb_q;
617 static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED;
618 
spinlock_profile_iterate(lock_profile_subfunc * sub,void * par)619 static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par)
620 {
621     int i;
622     struct lock_profile_qhead *hq;
623     struct lock_profile *eq;
624 
625     spin_lock(&lock_profile_lock);
626     for ( i = 0; i < LOCKPROF_TYPE_N; i++ )
627         for ( hq = lock_profile_ancs[i].head_q; hq; hq = hq->head_q )
628             for ( eq = hq->elem_q; eq; eq = eq->next )
629                 sub(eq, i, hq->idx, par);
630     spin_unlock(&lock_profile_lock);
631 }
632 
spinlock_profile_print_elem(struct lock_profile * data,int32_t type,int32_t idx,void * par)633 static void cf_check spinlock_profile_print_elem(struct lock_profile *data,
634     int32_t type, int32_t idx, void *par)
635 {
636     unsigned int cpu;
637     unsigned int lockval;
638 
639     if ( data->is_rlock )
640     {
641         cpu = data->ptr.rlock->debug.cpu;
642         lockval = data->ptr.rlock->tickets.head_tail;
643     }
644     else
645     {
646         cpu = data->ptr.lock->debug.cpu;
647         lockval = data->ptr.lock->tickets.head_tail;
648     }
649 
650     printk("%s ", lock_profile_ancs[type].name);
651     if ( type != LOCKPROF_TYPE_GLOBAL )
652         printk("%d ", idx);
653     printk("%s: addr=%p, lockval=%08x, ", data->name, data->ptr.lock, lockval);
654     if ( cpu == SPINLOCK_NO_CPU )
655         printk("not locked\n");
656     else
657         printk("cpu=%u\n", cpu);
658     printk("  lock:%" PRIu64 "(%" PRI_stime "), block:%" PRIu64 "(%" PRI_stime ")\n",
659            data->lock_cnt, data->time_hold, (uint64_t)data->block_cnt,
660            data->time_block);
661 }
662 
spinlock_profile_printall(unsigned char key)663 void cf_check spinlock_profile_printall(unsigned char key)
664 {
665     s_time_t now = NOW();
666     s_time_t diff;
667 
668     diff = now - lock_profile_start;
669     printk("Xen lock profile info SHOW  (now = %"PRI_stime" total = "
670            "%"PRI_stime")\n", now, diff);
671     spinlock_profile_iterate(spinlock_profile_print_elem, NULL);
672 }
673 
spinlock_profile_reset_elem(struct lock_profile * data,int32_t type,int32_t idx,void * par)674 static void cf_check spinlock_profile_reset_elem(struct lock_profile *data,
675     int32_t type, int32_t idx, void *par)
676 {
677     data->lock_cnt = 0;
678     data->block_cnt = 0;
679     data->time_hold = 0;
680     data->time_block = 0;
681 }
682 
spinlock_profile_reset(unsigned char key)683 void cf_check spinlock_profile_reset(unsigned char key)
684 {
685     s_time_t now = NOW();
686 
687     if ( key != '\0' )
688         printk("Xen lock profile info RESET (now = %"PRI_stime")\n", now);
689     lock_profile_start = now;
690     spinlock_profile_iterate(spinlock_profile_reset_elem, NULL);
691 }
692 
693 #ifdef CONFIG_SYSCTL
694 typedef struct {
695     struct xen_sysctl_lockprof_op *pc;
696     int                      rc;
697 } spinlock_profile_ucopy_t;
698 
spinlock_profile_ucopy_elem(struct lock_profile * data,int32_t type,int32_t idx,void * par)699 static void cf_check spinlock_profile_ucopy_elem(struct lock_profile *data,
700     int32_t type, int32_t idx, void *par)
701 {
702     spinlock_profile_ucopy_t *p = par;
703     struct xen_sysctl_lockprof_data elem;
704 
705     if ( p->rc )
706         return;
707 
708     if ( p->pc->nr_elem < p->pc->max_elem )
709     {
710         safe_strcpy(elem.name, data->name);
711         elem.type = type;
712         elem.idx = idx;
713         elem.lock_cnt = data->lock_cnt;
714         elem.block_cnt = data->block_cnt;
715         elem.lock_time = data->time_hold;
716         elem.block_time = data->time_block;
717         if ( copy_to_guest_offset(p->pc->data, p->pc->nr_elem, &elem, 1) )
718             p->rc = -EFAULT;
719     }
720 
721     if ( !p->rc )
722         p->pc->nr_elem++;
723 }
724 
725 /* Dom0 control of lock profiling */
spinlock_profile_control(struct xen_sysctl_lockprof_op * pc)726 int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc)
727 {
728     int rc = 0;
729     spinlock_profile_ucopy_t par;
730 
731     switch ( pc->cmd )
732     {
733     case XEN_SYSCTL_LOCKPROF_reset:
734         spinlock_profile_reset('\0');
735         break;
736 
737     case XEN_SYSCTL_LOCKPROF_query:
738         pc->nr_elem = 0;
739         par.rc = 0;
740         par.pc = pc;
741         spinlock_profile_iterate(spinlock_profile_ucopy_elem, &par);
742         pc->time = NOW() - lock_profile_start;
743         rc = par.rc;
744         break;
745 
746     default:
747         rc = -EINVAL;
748         break;
749     }
750 
751     return rc;
752 }
753 #endif /* CONFIG_SYSCTL */
754 
_lock_profile_register_struct(int32_t type,struct lock_profile_qhead * qhead,int32_t idx)755 void _lock_profile_register_struct(
756     int32_t type, struct lock_profile_qhead *qhead, int32_t idx)
757 {
758     qhead->idx = idx;
759     spin_lock(&lock_profile_lock);
760     qhead->head_q = lock_profile_ancs[type].head_q;
761     lock_profile_ancs[type].head_q = qhead;
762     spin_unlock(&lock_profile_lock);
763 }
764 
_lock_profile_deregister_struct(int32_t type,struct lock_profile_qhead * qhead)765 void _lock_profile_deregister_struct(
766     int32_t type, struct lock_profile_qhead *qhead)
767 {
768     struct lock_profile_qhead **q;
769 
770     spin_lock(&lock_profile_lock);
771     for ( q = &lock_profile_ancs[type].head_q; *q; q = &(*q)->head_q )
772     {
773         if ( *q == qhead )
774         {
775             *q = qhead->head_q;
776             break;
777         }
778     }
779     spin_unlock(&lock_profile_lock);
780 }
781 
782 extern struct lock_profile *__lock_profile_start[];
783 extern struct lock_profile *__lock_profile_end[];
784 
lock_prof_init(void)785 static int __init cf_check lock_prof_init(void)
786 {
787     struct lock_profile **q;
788 
789     BUILD_BUG_ON(ARRAY_SIZE(lock_profile_ancs) != LOCKPROF_TYPE_N);
790 
791     for ( q = __lock_profile_start; q < __lock_profile_end; q++ )
792     {
793         (*q)->next = lock_profile_glb_q.elem_q;
794         lock_profile_glb_q.elem_q = *q;
795 
796         if ( (*q)->is_rlock )
797             (*q)->ptr.rlock->profile = *q;
798         else
799             (*q)->ptr.lock->profile = *q;
800     }
801 
802     _lock_profile_register_struct(LOCKPROF_TYPE_GLOBAL,
803                                   &lock_profile_glb_q, 0);
804 
805     return 0;
806 }
807 __initcall(lock_prof_init);
808 
809 #endif /* CONFIG_DEBUG_LOCK_PROFILE */
810