1 /*
2  * Copyright (C) 2005 Hewlett-Packard Co.
3  * written by Aravind Menon & Jose Renato Santos
4  *            (email: xenoprof@groups.hp.com)
5  *
6  * arch generic xenoprof and IA64 support.
7  * dynamic map/unmap xenoprof buffer support.
8  * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9  *                    VA Linux Systems Japan K.K.
10  */
11 
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <xen/xenoprof.h>
17 #include <public/xenoprof.h>
18 #include <xen/paging.h>
19 #include <xsm/xsm.h>
20 #include <xen/hypercall.h>
21 
22 /* Override macros from asm/page.h to make them work with mfn_t */
23 #undef virt_to_mfn
24 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
25 #undef mfn_to_page
26 #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
27 
28 /* Limit amount of pages used for shared buffer (per domain) */
29 #define MAX_OPROF_SHARED_PAGES 32
30 
31 /* Lock protecting the following global state */
32 static DEFINE_SPINLOCK(xenoprof_lock);
33 
34 static DEFINE_SPINLOCK(pmu_owner_lock);
35 int pmu_owner = 0;
36 int pmu_hvm_refcount = 0;
37 
38 static struct domain *active_domains[MAX_OPROF_DOMAINS];
39 static int active_ready[MAX_OPROF_DOMAINS];
40 static unsigned int adomains;
41 
42 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
43 static unsigned int pdomains;
44 
45 static unsigned int activated;
46 static struct domain *xenoprof_primary_profiler;
47 static int xenoprof_state = XENOPROF_IDLE;
48 static unsigned long backtrace_depth;
49 
50 static u64 total_samples;
51 static u64 invalid_buffer_samples;
52 static u64 corrupted_buffer_samples;
53 static u64 lost_samples;
54 static u64 active_samples;
55 static u64 passive_samples;
56 static u64 idle_samples;
57 static u64 others_samples;
58 
acquire_pmu_ownership(int pmu_ownership)59 int acquire_pmu_ownership(int pmu_ownership)
60 {
61     spin_lock(&pmu_owner_lock);
62     if ( pmu_owner == PMU_OWNER_NONE )
63     {
64         pmu_owner = pmu_ownership;
65         goto out;
66     }
67 
68     if ( pmu_owner == pmu_ownership )
69         goto out;
70 
71     spin_unlock(&pmu_owner_lock);
72     return 0;
73  out:
74     if ( pmu_owner == PMU_OWNER_HVM )
75         pmu_hvm_refcount++;
76     spin_unlock(&pmu_owner_lock);
77     return 1;
78 }
79 
release_pmu_ownership(int pmu_ownership)80 void release_pmu_ownership(int pmu_ownership)
81 {
82     spin_lock(&pmu_owner_lock);
83     if ( pmu_ownership == PMU_OWNER_HVM )
84         pmu_hvm_refcount--;
85     if ( !pmu_hvm_refcount )
86         pmu_owner = PMU_OWNER_NONE;
87     spin_unlock(&pmu_owner_lock);
88 }
89 
is_active(struct domain * d)90 int is_active(struct domain *d)
91 {
92     struct xenoprof *x = d->xenoprof;
93     return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
94 }
95 
is_passive(struct domain * d)96 int is_passive(struct domain *d)
97 {
98     struct xenoprof *x = d->xenoprof;
99     return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
100 }
101 
is_profiled(struct domain * d)102 static int is_profiled(struct domain *d)
103 {
104     return (is_active(d) || is_passive(d));
105 }
106 
xenoprof_reset_stat(void)107 static void xenoprof_reset_stat(void)
108 {
109     total_samples = 0;
110     invalid_buffer_samples = 0;
111     corrupted_buffer_samples = 0;
112     lost_samples = 0;
113     active_samples = 0;
114     passive_samples = 0;
115     idle_samples = 0;
116     others_samples = 0;
117 }
118 
xenoprof_reset_buf(struct domain * d)119 static void xenoprof_reset_buf(struct domain *d)
120 {
121     int j;
122     xenoprof_buf_t *buf;
123 
124     if ( d->xenoprof == NULL )
125     {
126         printk("xenoprof_reset_buf: ERROR - Unexpected "
127                "Xenoprof NULL pointer \n");
128         return;
129     }
130 
131     for ( j = 0; j < d->max_vcpus; j++ )
132     {
133         buf = d->xenoprof->vcpu[j].buffer;
134         if ( buf != NULL )
135         {
136             xenoprof_buf(d, buf, event_head) = 0;
137             xenoprof_buf(d, buf, event_tail) = 0;
138         }
139     }
140 }
141 
142 static int
share_xenoprof_page_with_guest(struct domain * d,mfn_t mfn,int npages)143 share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, int npages)
144 {
145     int i;
146 
147     /* Check if previous page owner has released the page. */
148     for ( i = 0; i < npages; i++ )
149     {
150         struct page_info *page = mfn_to_page(mfn_add(mfn, i));
151 
152         if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
153         {
154             printk(XENLOG_G_INFO "dom%d mfn %#lx page->count_info %#lx\n",
155                    d->domain_id, mfn_x(mfn_add(mfn, i)), page->count_info);
156             return -EBUSY;
157         }
158         page_set_owner(page, NULL);
159     }
160 
161     for ( i = 0; i < npages; i++ )
162         share_xen_page_with_guest(mfn_to_page(mfn_add(mfn, i)),
163                                   d, XENSHARE_writable);
164 
165     return 0;
166 }
167 
168 static void
unshare_xenoprof_page_with_guest(struct xenoprof * x)169 unshare_xenoprof_page_with_guest(struct xenoprof *x)
170 {
171     int i, npages = x->npages;
172     mfn_t mfn = virt_to_mfn(x->rawbuf);
173 
174     for ( i = 0; i < npages; i++ )
175     {
176         struct page_info *page = mfn_to_page(mfn_add(mfn, i));
177 
178         BUG_ON(page_get_owner(page) != current->domain);
179         if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
180             put_page(page);
181     }
182 }
183 
184 static void
xenoprof_shared_gmfn_with_guest(struct domain * d,unsigned long maddr,unsigned long gmaddr,int npages)185 xenoprof_shared_gmfn_with_guest(
186     struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
187 {
188     int i;
189 
190     for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
191     {
192         BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
193         if ( i == 0 )
194             gdprintk(XENLOG_WARNING,
195                      "xenoprof unsupported with autotranslated guests\n");
196 
197     }
198 }
199 
alloc_xenoprof_struct(struct domain * d,int max_samples,int is_passive)200 static int alloc_xenoprof_struct(
201     struct domain *d, int max_samples, int is_passive)
202 {
203     struct vcpu *v;
204     int nvcpu, npages, bufsize, max_bufsize;
205     unsigned max_max_samples;
206     int i;
207 
208     nvcpu = 0;
209     for_each_vcpu ( d, v )
210         nvcpu++;
211 
212     if ( !nvcpu )
213         return -EINVAL;
214 
215     d->xenoprof = xzalloc(struct xenoprof);
216     if ( d->xenoprof == NULL )
217     {
218         printk("alloc_xenoprof_struct(): memory allocation failed\n");
219         return -ENOMEM;
220     }
221 
222     d->xenoprof->vcpu = xzalloc_array(struct xenoprof_vcpu, d->max_vcpus);
223     if ( d->xenoprof->vcpu == NULL )
224     {
225         xfree(d->xenoprof);
226         d->xenoprof = NULL;
227         printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
228         return -ENOMEM;
229     }
230 
231     bufsize = sizeof(struct xenoprof_buf);
232     i = sizeof(struct event_log);
233 #ifdef CONFIG_COMPAT
234     d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d);
235     if ( XENOPROF_COMPAT(d->xenoprof) )
236     {
237         bufsize = sizeof(struct compat_oprof_buf);
238         i = sizeof(struct compat_event_log);
239     }
240 #endif
241 
242     /* reduce max_samples if necessary to limit pages allocated */
243     max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
244     max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
245     if ( (unsigned)max_samples > max_max_samples )
246         max_samples = max_max_samples;
247 
248     bufsize += (max_samples - 1) * i;
249     npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
250 
251     d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
252     if ( d->xenoprof->rawbuf == NULL )
253     {
254         xfree(d->xenoprof->vcpu);
255         xfree(d->xenoprof);
256         d->xenoprof = NULL;
257         return -ENOMEM;
258     }
259 
260     d->xenoprof->npages = npages;
261     d->xenoprof->nbuf = nvcpu;
262     d->xenoprof->bufsize = bufsize;
263     d->xenoprof->domain_ready = 0;
264     d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
265 
266     /* Update buffer pointers for active vcpus */
267     i = 0;
268     for_each_vcpu ( d, v )
269     {
270         xenoprof_buf_t *buf = (xenoprof_buf_t *)
271             &d->xenoprof->rawbuf[i * bufsize];
272 
273         d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
274         d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
275         xenoprof_buf(d, buf, event_size) = max_samples;
276         xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
277 
278         i++;
279         /* in the unlikely case that the number of active vcpus changes */
280         if ( i >= nvcpu )
281             break;
282     }
283 
284     return 0;
285 }
286 
free_xenoprof_pages(struct domain * d)287 void free_xenoprof_pages(struct domain *d)
288 {
289     struct xenoprof *x;
290     int order;
291 
292     x = d->xenoprof;
293     if ( x == NULL )
294         return;
295 
296     if ( x->rawbuf != NULL )
297     {
298         order = get_order_from_pages(x->npages);
299         free_xenheap_pages(x->rawbuf, order);
300     }
301 
302     xfree(x->vcpu);
303     xfree(x);
304     d->xenoprof = NULL;
305 }
306 
active_index(struct domain * d)307 static int active_index(struct domain *d)
308 {
309     int i;
310 
311     for ( i = 0; i < adomains; i++ )
312         if ( active_domains[i] == d )
313             return i;
314 
315     return -1;
316 }
317 
set_active(struct domain * d)318 static int set_active(struct domain *d)
319 {
320     int ind;
321     struct xenoprof *x;
322 
323     ind = active_index(d);
324     if ( ind < 0 )
325         return -EPERM;
326 
327     x = d->xenoprof;
328     if ( x == NULL )
329         return -EPERM;
330 
331     x->domain_ready = 1;
332     x->domain_type = XENOPROF_DOMAIN_ACTIVE;
333     active_ready[ind] = 1;
334     activated++;
335 
336     return 0;
337 }
338 
reset_active(struct domain * d)339 static int reset_active(struct domain *d)
340 {
341     int ind;
342     struct xenoprof *x;
343 
344     ind = active_index(d);
345     if ( ind < 0 )
346         return -EPERM;
347 
348     x = d->xenoprof;
349     if ( x == NULL )
350         return -EPERM;
351 
352     x->domain_ready = 0;
353     x->domain_type = XENOPROF_DOMAIN_IGNORED;
354     active_ready[ind] = 0;
355     active_domains[ind] = NULL;
356     activated--;
357     put_domain(d);
358 
359     if ( activated <= 0 )
360         adomains = 0;
361 
362     return 0;
363 }
364 
reset_passive(struct domain * d)365 static void reset_passive(struct domain *d)
366 {
367     struct xenoprof *x;
368 
369     if ( d == NULL )
370         return;
371 
372     x = d->xenoprof;
373     if ( x == NULL )
374         return;
375 
376     unshare_xenoprof_page_with_guest(x);
377     x->domain_type = XENOPROF_DOMAIN_IGNORED;
378 }
379 
reset_active_list(void)380 static void reset_active_list(void)
381 {
382     int i;
383 
384     for ( i = 0; i < adomains; i++ )
385         if ( active_ready[i] )
386             reset_active(active_domains[i]);
387 
388     adomains = 0;
389     activated = 0;
390 }
391 
reset_passive_list(void)392 static void reset_passive_list(void)
393 {
394     int i;
395 
396     for ( i = 0; i < pdomains; i++ )
397     {
398         reset_passive(passive_domains[i]);
399         put_domain(passive_domains[i]);
400         passive_domains[i] = NULL;
401     }
402 
403     pdomains = 0;
404 }
405 
add_active_list(domid_t domid)406 static int add_active_list(domid_t domid)
407 {
408     struct domain *d;
409 
410     if ( adomains >= MAX_OPROF_DOMAINS )
411         return -E2BIG;
412 
413     d = get_domain_by_id(domid);
414     if ( d == NULL )
415         return -EINVAL;
416 
417     active_domains[adomains] = d;
418     active_ready[adomains] = 0;
419     adomains++;
420 
421     return 0;
422 }
423 
add_passive_list(XEN_GUEST_HANDLE_PARAM (void)arg)424 static int add_passive_list(XEN_GUEST_HANDLE_PARAM(void) arg)
425 {
426     struct xenoprof_passive passive;
427     struct domain *d;
428     int ret = 0;
429 
430     if ( pdomains >= MAX_OPROF_DOMAINS )
431         return -E2BIG;
432 
433     if ( copy_from_guest(&passive, arg, 1) )
434         return -EFAULT;
435 
436     d = get_domain_by_id(passive.domain_id);
437     if ( d == NULL )
438         return -EINVAL;
439 
440     if ( d->xenoprof == NULL )
441     {
442         ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
443         if ( ret < 0 )
444         {
445             put_domain(d);
446             return -ENOMEM;
447         }
448     }
449 
450     ret = share_xenoprof_page_with_guest(
451         current->domain, virt_to_mfn(d->xenoprof->rawbuf),
452         d->xenoprof->npages);
453     if ( ret < 0 )
454     {
455         put_domain(d);
456         return ret;
457     }
458 
459     d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
460     passive.nbuf = d->xenoprof->nbuf;
461     passive.bufsize = d->xenoprof->bufsize;
462     if ( !paging_mode_translate(current->domain) )
463         passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
464     else
465         xenoprof_shared_gmfn_with_guest(
466             current->domain, __pa(d->xenoprof->rawbuf),
467             passive.buf_gmaddr, d->xenoprof->npages);
468 
469     if ( __copy_to_guest(arg, &passive, 1) )
470     {
471         put_domain(d);
472         return -EFAULT;
473     }
474 
475     passive_domains[pdomains] = d;
476     pdomains++;
477 
478     return ret;
479 }
480 
481 
482 /* Get space in the buffer */
xenoprof_buf_space(struct domain * d,xenoprof_buf_t * buf,int size)483 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
484 {
485     int head, tail;
486 
487     head = xenoprof_buf(d, buf, event_head);
488     tail = xenoprof_buf(d, buf, event_tail);
489 
490     return ((tail > head) ? 0 : size) + tail - head - 1;
491 }
492 
493 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
xenoprof_add_sample(struct domain * d,xenoprof_buf_t * buf,uint64_t eip,int mode,int event)494 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
495                                uint64_t eip, int mode, int event)
496 {
497     int head, tail, size;
498 
499     head = xenoprof_buf(d, buf, event_head);
500     tail = xenoprof_buf(d, buf, event_tail);
501     size = xenoprof_buf(d, buf, event_size);
502 
503     /* make sure indexes in shared buffer are sane */
504     if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
505     {
506         corrupted_buffer_samples++;
507         return 0;
508     }
509 
510     if ( xenoprof_buf_space(d, buf, size) > 0 )
511     {
512         xenoprof_buf(d, buf, event_log[head].eip) = eip;
513         xenoprof_buf(d, buf, event_log[head].mode) = mode;
514         xenoprof_buf(d, buf, event_log[head].event) = event;
515         head++;
516         if ( head >= size )
517             head = 0;
518 
519         xenoprof_buf(d, buf, event_head) = head;
520     }
521     else
522     {
523         xenoprof_buf(d, buf, lost_samples)++;
524         lost_samples++;
525         return 0;
526     }
527 
528     return 1;
529 }
530 
xenoprof_add_trace(struct vcpu * vcpu,uint64_t pc,int mode)531 int xenoprof_add_trace(struct vcpu *vcpu, uint64_t pc, int mode)
532 {
533     struct domain *d = vcpu->domain;
534     xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
535 
536     /* Do not accidentally write an escape code due to a broken frame. */
537     if ( pc == XENOPROF_ESCAPE_CODE )
538     {
539         invalid_buffer_samples++;
540         return 0;
541     }
542 
543     return xenoprof_add_sample(d, buf, pc, mode, 0);
544 }
545 
xenoprof_log_event(struct vcpu * vcpu,const struct cpu_user_regs * regs,uint64_t pc,int mode,int event)546 void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs,
547                         uint64_t pc, int mode, int event)
548 {
549     struct domain *d = vcpu->domain;
550     struct xenoprof_vcpu *v;
551     xenoprof_buf_t *buf;
552 
553     total_samples++;
554 
555     /* Ignore samples of un-monitored domains. */
556     if ( !is_profiled(d) )
557     {
558         others_samples++;
559         return;
560     }
561 
562     v = &d->xenoprof->vcpu[vcpu->vcpu_id];
563     if ( v->buffer == NULL )
564     {
565         invalid_buffer_samples++;
566         return;
567     }
568 
569     buf = v->buffer;
570 
571     /* Provide backtrace if requested. */
572     if ( backtrace_depth > 0 )
573     {
574         if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
575              !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
576                                   XENOPROF_TRACE_BEGIN) )
577         {
578             xenoprof_buf(d, buf, lost_samples)++;
579             lost_samples++;
580             return;
581         }
582     }
583 
584     if ( xenoprof_add_sample(d, buf, pc, mode, event) )
585     {
586         if ( is_active(vcpu->domain) )
587             active_samples++;
588         else
589             passive_samples++;
590         if ( mode == 0 )
591             xenoprof_buf(d, buf, user_samples)++;
592         else if ( mode == 1 )
593             xenoprof_buf(d, buf, kernel_samples)++;
594         else
595             xenoprof_buf(d, buf, xen_samples)++;
596 
597     }
598 
599     if ( backtrace_depth > 0 )
600         xenoprof_backtrace(vcpu, regs, backtrace_depth, mode);
601 }
602 
603 
604 
xenoprof_op_init(XEN_GUEST_HANDLE_PARAM (void)arg)605 static int xenoprof_op_init(XEN_GUEST_HANDLE_PARAM(void) arg)
606 {
607     struct domain *d = current->domain;
608     struct xenoprof_init xenoprof_init;
609     int ret;
610 
611     if ( copy_from_guest(&xenoprof_init, arg, 1) )
612         return -EFAULT;
613 
614     if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
615                                    xenoprof_init.cpu_type)) )
616         return ret;
617 
618     /* Only the hardware domain may become the primary profiler here because
619      * there is currently no cleanup of xenoprof_primary_profiler or associated
620      * profiling state when the primary profiling domain is shut down or
621      * crashes.  Once a better cleanup method is present, it will be possible to
622      * allow another domain to be the primary profiler.
623      */
624     xenoprof_init.is_primary =
625         ((xenoprof_primary_profiler == d) ||
626          ((xenoprof_primary_profiler == NULL) && is_hardware_domain(d)));
627     if ( xenoprof_init.is_primary )
628         xenoprof_primary_profiler = current->domain;
629 
630     return __copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0;
631 }
632 
633 #define ret_t long
634 
635 #endif /* !COMPAT */
636 
xenoprof_op_get_buffer(XEN_GUEST_HANDLE_PARAM (void)arg)637 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE_PARAM(void) arg)
638 {
639     struct xenoprof_get_buffer xenoprof_get_buffer;
640     struct domain *d = current->domain;
641     int ret;
642 
643     if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
644         return -EFAULT;
645 
646     /*
647      * We allocate xenoprof struct and buffers only at first time
648      * get_buffer is called. Memory is then kept until domain is destroyed.
649      */
650     if ( d->xenoprof == NULL )
651     {
652         ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
653         if ( ret < 0 )
654             return ret;
655     }
656 
657     ret = share_xenoprof_page_with_guest(
658         d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
659     if ( ret < 0 )
660         return ret;
661 
662     xenoprof_reset_buf(d);
663 
664     d->xenoprof->domain_type  = XENOPROF_DOMAIN_IGNORED;
665     d->xenoprof->domain_ready = 0;
666     d->xenoprof->is_primary   = (xenoprof_primary_profiler == current->domain);
667 
668     xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
669     xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
670     if ( !paging_mode_translate(d) )
671         xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
672     else
673         xenoprof_shared_gmfn_with_guest(
674             d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
675             d->xenoprof->npages);
676 
677     return __copy_to_guest(arg, &xenoprof_get_buffer, 1) ? -EFAULT : 0;
678 }
679 
680 #define NONPRIV_OP(op) ( (op == XENOPROF_init)          \
681                       || (op == XENOPROF_enable_virq)   \
682                       || (op == XENOPROF_disable_virq)  \
683                       || (op == XENOPROF_get_buffer))
684 
do_xenoprof_op(int op,XEN_GUEST_HANDLE_PARAM (void)arg)685 ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
686 {
687     int ret = 0;
688 
689     if ( (op < 0) || (op > XENOPROF_last_op) )
690     {
691         gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op);
692         return -EINVAL;
693     }
694 
695     if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
696     {
697         gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op);
698         return -EPERM;
699     }
700 
701     ret = xsm_profile(XSM_HOOK, current->domain, op);
702     if ( ret )
703         return ret;
704 
705     spin_lock(&xenoprof_lock);
706 
707     switch ( op )
708     {
709     case XENOPROF_init:
710         ret = xenoprof_op_init(arg);
711         if ( (ret == 0) &&
712              (current->domain == xenoprof_primary_profiler) )
713             xenoprof_state = XENOPROF_INITIALIZED;
714         break;
715 
716     case XENOPROF_get_buffer:
717         if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
718         {
719             ret = -EBUSY;
720             break;
721         }
722         ret = xenoprof_op_get_buffer(arg);
723         break;
724 
725     case XENOPROF_reset_active_list:
726         reset_active_list();
727         ret = 0;
728         break;
729 
730     case XENOPROF_reset_passive_list:
731         reset_passive_list();
732         ret = 0;
733         break;
734 
735     case XENOPROF_set_active:
736     {
737         domid_t domid;
738         if ( xenoprof_state != XENOPROF_INITIALIZED )
739         {
740             ret = -EPERM;
741             break;
742         }
743         if ( copy_from_guest(&domid, arg, 1) )
744         {
745             ret = -EFAULT;
746             break;
747         }
748         ret = add_active_list(domid);
749         break;
750     }
751 
752     case XENOPROF_set_passive:
753         if ( xenoprof_state != XENOPROF_INITIALIZED )
754         {
755             ret = -EPERM;
756             break;
757         }
758         ret = add_passive_list(arg);
759         break;
760 
761     case XENOPROF_reserve_counters:
762         if ( xenoprof_state != XENOPROF_INITIALIZED )
763         {
764             ret = -EPERM;
765             break;
766         }
767         ret = xenoprof_arch_reserve_counters();
768         if ( !ret )
769             xenoprof_state = XENOPROF_COUNTERS_RESERVED;
770         break;
771 
772     case XENOPROF_counter:
773         if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
774              (adomains == 0) )
775         {
776             ret = -EPERM;
777             break;
778         }
779         ret = xenoprof_arch_counter(arg);
780         break;
781 
782     case XENOPROF_setup_events:
783         if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
784         {
785             ret = -EPERM;
786             break;
787         }
788         ret = xenoprof_arch_setup_events();
789         if ( !ret )
790             xenoprof_state = XENOPROF_READY;
791         break;
792 
793     case XENOPROF_enable_virq:
794     {
795         int i;
796 
797         if ( current->domain == xenoprof_primary_profiler )
798         {
799             if ( xenoprof_state != XENOPROF_READY )
800             {
801                 ret = -EPERM;
802                 break;
803             }
804             xenoprof_arch_enable_virq();
805             xenoprof_reset_stat();
806             for ( i = 0; i < pdomains; i++ )
807                 xenoprof_reset_buf(passive_domains[i]);
808         }
809         xenoprof_reset_buf(current->domain);
810         ret = set_active(current->domain);
811         break;
812     }
813 
814     case XENOPROF_start:
815         ret = -EPERM;
816         if ( (xenoprof_state == XENOPROF_READY) &&
817              (activated == adomains) )
818             ret = xenoprof_arch_start();
819         if ( ret == 0 )
820             xenoprof_state = XENOPROF_PROFILING;
821         break;
822 
823     case XENOPROF_stop:
824     {
825         struct domain *d;
826         struct vcpu *v;
827         int i;
828 
829         if ( xenoprof_state != XENOPROF_PROFILING )
830         {
831             ret = -EPERM;
832             break;
833         }
834         xenoprof_arch_stop();
835 
836         /* Flush remaining samples. */
837         for ( i = 0; i < adomains; i++ )
838         {
839             if ( !active_ready[i] )
840                 continue;
841             d = active_domains[i];
842             for_each_vcpu(d, v)
843                 send_guest_vcpu_virq(v, VIRQ_XENOPROF);
844         }
845         xenoprof_state = XENOPROF_READY;
846         break;
847     }
848 
849     case XENOPROF_disable_virq:
850     {
851         struct xenoprof *x;
852         if ( (xenoprof_state == XENOPROF_PROFILING) &&
853              (is_active(current->domain)) )
854         {
855             ret = -EPERM;
856             break;
857         }
858         if ( (ret = reset_active(current->domain)) != 0 )
859             break;
860         x = current->domain->xenoprof;
861         unshare_xenoprof_page_with_guest(x);
862         release_pmu_ownership(PMU_OWNER_XENOPROF);
863         break;
864     }
865 
866     case XENOPROF_release_counters:
867         ret = -EPERM;
868         if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
869              (xenoprof_state == XENOPROF_READY) )
870         {
871             xenoprof_state = XENOPROF_INITIALIZED;
872             xenoprof_arch_release_counters();
873             xenoprof_arch_disable_virq();
874             reset_passive_list();
875             ret = 0;
876         }
877         break;
878 
879     case XENOPROF_shutdown:
880         ret = -EPERM;
881         if ( xenoprof_state == XENOPROF_INITIALIZED )
882         {
883             activated = 0;
884             adomains=0;
885             xenoprof_primary_profiler = NULL;
886             backtrace_depth=0;
887             ret = 0;
888         }
889         break;
890 
891     case XENOPROF_set_backtrace:
892         ret = 0;
893         if ( !xenoprof_backtrace_supported() )
894             ret = -EINVAL;
895         else if ( copy_from_guest(&backtrace_depth, arg, 1) )
896             ret = -EFAULT;
897         break;
898 
899     case XENOPROF_ibs_counter:
900         if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
901              (adomains == 0) )
902         {
903             ret = -EPERM;
904             break;
905         }
906         ret = xenoprof_arch_ibs_counter(arg);
907         break;
908 
909     case XENOPROF_get_ibs_caps:
910         ret = ibs_caps;
911         break;
912 
913     default:
914         ret = -ENOSYS;
915     }
916 
917     spin_unlock(&xenoprof_lock);
918 
919     if ( ret < 0 )
920         gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret);
921 
922     return ret;
923 }
924 
925 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
926 #undef ret_t
927 #include "compat/xenoprof.c"
928 #endif
929 
930 /*
931  * Local variables:
932  * mode: C
933  * c-file-style: "BSD"
934  * c-basic-offset: 4
935  * tab-width: 4
936  * indent-tabs-mode: nil
937  * End:
938  */
939