Lines Matching refs:data

55 timerlat_free_histogram(struct timerlat_hist_data *data)  in timerlat_free_histogram()  argument
60 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_free_histogram()
61 if (data->hist[cpu].irq) in timerlat_free_histogram()
62 free(data->hist[cpu].irq); in timerlat_free_histogram()
64 if (data->hist[cpu].thread) in timerlat_free_histogram()
65 free(data->hist[cpu].thread); in timerlat_free_histogram()
67 if (data->hist[cpu].user) in timerlat_free_histogram()
68 free(data->hist[cpu].user); in timerlat_free_histogram()
73 if (data->hist) in timerlat_free_histogram()
74 free(data->hist); in timerlat_free_histogram()
76 free(data); in timerlat_free_histogram()
85 struct timerlat_hist_data *data; in timerlat_alloc_histogram() local
88 data = calloc(1, sizeof(*data)); in timerlat_alloc_histogram()
89 if (!data) in timerlat_alloc_histogram()
92 data->entries = entries; in timerlat_alloc_histogram()
93 data->bucket_size = bucket_size; in timerlat_alloc_histogram()
94 data->nr_cpus = nr_cpus; in timerlat_alloc_histogram()
97 data->hist = calloc(1, sizeof(*data->hist) * nr_cpus); in timerlat_alloc_histogram()
98 if (!data->hist) in timerlat_alloc_histogram()
103 data->hist[cpu].irq = calloc(1, sizeof(*data->hist->irq) * (entries + 1)); in timerlat_alloc_histogram()
104 if (!data->hist[cpu].irq) in timerlat_alloc_histogram()
107 data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1)); in timerlat_alloc_histogram()
108 if (!data->hist[cpu].thread) in timerlat_alloc_histogram()
111 data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1)); in timerlat_alloc_histogram()
112 if (!data->hist[cpu].user) in timerlat_alloc_histogram()
118 data->hist[cpu].min_irq = ~0; in timerlat_alloc_histogram()
119 data->hist[cpu].min_thread = ~0; in timerlat_alloc_histogram()
120 data->hist[cpu].min_user = ~0; in timerlat_alloc_histogram()
123 return data; in timerlat_alloc_histogram()
126 timerlat_free_histogram(data); in timerlat_alloc_histogram()
139 struct timerlat_hist_data *data = tool->data; in timerlat_hist_update() local
140 int entries = data->entries; in timerlat_hist_update()
147 bucket = latency / data->bucket_size; in timerlat_hist_update()
150 hist = data->hist[cpu].irq; in timerlat_hist_update()
151 data->hist[cpu].irq_count++; in timerlat_hist_update()
152 update_min(&data->hist[cpu].min_irq, &latency); in timerlat_hist_update()
153 update_sum(&data->hist[cpu].sum_irq, &latency); in timerlat_hist_update()
154 update_max(&data->hist[cpu].max_irq, &latency); in timerlat_hist_update()
156 hist = data->hist[cpu].thread; in timerlat_hist_update()
157 data->hist[cpu].thread_count++; in timerlat_hist_update()
158 update_min(&data->hist[cpu].min_thread, &latency); in timerlat_hist_update()
159 update_sum(&data->hist[cpu].sum_thread, &latency); in timerlat_hist_update()
160 update_max(&data->hist[cpu].max_thread, &latency); in timerlat_hist_update()
162 hist = data->hist[cpu].user; in timerlat_hist_update()
163 data->hist[cpu].user_count++; in timerlat_hist_update()
164 update_min(&data->hist[cpu].min_user, &latency); in timerlat_hist_update()
165 update_sum(&data->hist[cpu].sum_user, &latency); in timerlat_hist_update()
166 update_max(&data->hist[cpu].max_user, &latency); in timerlat_hist_update()
180 struct tep_event *event, void *data) in timerlat_hist_handler() argument
182 struct trace_instance *trace = data; in timerlat_hist_handler()
202 struct timerlat_hist_data *data = tool->data; in timerlat_hist_bpf_pull_data() local
204 long long value_irq[data->nr_cpus], in timerlat_hist_bpf_pull_data()
205 value_thread[data->nr_cpus], in timerlat_hist_bpf_pull_data()
206 value_user[data->nr_cpus]; in timerlat_hist_bpf_pull_data()
209 for (i = 0; i < data->entries; i++) { in timerlat_hist_bpf_pull_data()
211 value_user, data->nr_cpus); in timerlat_hist_bpf_pull_data()
214 for (j = 0; j < data->nr_cpus; j++) { in timerlat_hist_bpf_pull_data()
215 data->hist[j].irq[i] = value_irq[j]; in timerlat_hist_bpf_pull_data()
216 data->hist[j].thread[i] = value_thread[j]; in timerlat_hist_bpf_pull_data()
217 data->hist[j].user[i] = value_user[j]; in timerlat_hist_bpf_pull_data()
224 data->nr_cpus); in timerlat_hist_bpf_pull_data()
227 for (i = 0; i < data->nr_cpus; i++) { in timerlat_hist_bpf_pull_data()
228 data->hist[i].irq_count = value_irq[i]; in timerlat_hist_bpf_pull_data()
229 data->hist[i].thread_count = value_thread[i]; in timerlat_hist_bpf_pull_data()
230 data->hist[i].user_count = value_user[i]; in timerlat_hist_bpf_pull_data()
235 data->nr_cpus); in timerlat_hist_bpf_pull_data()
238 for (i = 0; i < data->nr_cpus; i++) { in timerlat_hist_bpf_pull_data()
239 data->hist[i].min_irq = value_irq[i]; in timerlat_hist_bpf_pull_data()
240 data->hist[i].min_thread = value_thread[i]; in timerlat_hist_bpf_pull_data()
241 data->hist[i].min_user = value_user[i]; in timerlat_hist_bpf_pull_data()
246 data->nr_cpus); in timerlat_hist_bpf_pull_data()
249 for (i = 0; i < data->nr_cpus; i++) { in timerlat_hist_bpf_pull_data()
250 data->hist[i].max_irq = value_irq[i]; in timerlat_hist_bpf_pull_data()
251 data->hist[i].max_thread = value_thread[i]; in timerlat_hist_bpf_pull_data()
252 data->hist[i].max_user = value_user[i]; in timerlat_hist_bpf_pull_data()
257 data->nr_cpus); in timerlat_hist_bpf_pull_data()
260 for (i = 0; i < data->nr_cpus; i++) { in timerlat_hist_bpf_pull_data()
261 data->hist[i].sum_irq = value_irq[i]; in timerlat_hist_bpf_pull_data()
262 data->hist[i].sum_thread = value_thread[i]; in timerlat_hist_bpf_pull_data()
263 data->hist[i].sum_user = value_user[i]; in timerlat_hist_bpf_pull_data()
268 data->nr_cpus); in timerlat_hist_bpf_pull_data()
271 for (i = 0; i < data->nr_cpus; i++) { in timerlat_hist_bpf_pull_data()
272 data->hist[i].irq[data->entries] = value_irq[i]; in timerlat_hist_bpf_pull_data()
273 data->hist[i].thread[data->entries] = value_thread[i]; in timerlat_hist_bpf_pull_data()
274 data->hist[i].user[data->entries] = value_user[i]; in timerlat_hist_bpf_pull_data()
286 struct timerlat_hist_data *data = tool->data; in timerlat_hist_header() local
305 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_hist_header()
309 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_hist_header()
349 struct timerlat_hist_data *data) in timerlat_print_summary() argument
359 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_summary()
363 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_summary()
368 data->hist[cpu].irq_count); in timerlat_print_summary()
372 data->hist[cpu].thread_count); in timerlat_print_summary()
376 data->hist[cpu].user_count); in timerlat_print_summary()
383 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_summary()
387 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_summary()
392 data->hist[cpu].irq_count, in timerlat_print_summary()
393 data->hist[cpu].min_irq, in timerlat_print_summary()
398 data->hist[cpu].thread_count, in timerlat_print_summary()
399 data->hist[cpu].min_thread, in timerlat_print_summary()
404 data->hist[cpu].user_count, in timerlat_print_summary()
405 data->hist[cpu].min_user, in timerlat_print_summary()
413 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_summary()
417 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_summary()
422 data->hist[cpu].irq_count, in timerlat_print_summary()
423 data->hist[cpu].sum_irq, in timerlat_print_summary()
428 data->hist[cpu].thread_count, in timerlat_print_summary()
429 data->hist[cpu].sum_thread, in timerlat_print_summary()
434 data->hist[cpu].user_count, in timerlat_print_summary()
435 data->hist[cpu].sum_user, in timerlat_print_summary()
443 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_summary()
447 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_summary()
452 data->hist[cpu].irq_count, in timerlat_print_summary()
453 data->hist[cpu].max_irq, in timerlat_print_summary()
458 data->hist[cpu].thread_count, in timerlat_print_summary()
459 data->hist[cpu].max_thread, in timerlat_print_summary()
464 data->hist[cpu].user_count, in timerlat_print_summary()
465 data->hist[cpu].max_user, in timerlat_print_summary()
476 struct timerlat_hist_data *data) in timerlat_print_stats_all() argument
490 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_stats_all()
494 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_stats_all()
497 cpu_data = &data->hist[cpu]; in timerlat_print_stats_all()
624 struct timerlat_hist_data *data = tool->data; in timerlat_print_stats() local
631 for (bucket = 0; bucket < data->entries; bucket++) { in timerlat_print_stats()
636 bucket * data->bucket_size); in timerlat_print_stats()
638 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_stats()
642 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_stats()
646 total += data->hist[cpu].irq[bucket]; in timerlat_print_stats()
648 data->hist[cpu].irq[bucket]); in timerlat_print_stats()
652 total += data->hist[cpu].thread[bucket]; in timerlat_print_stats()
654 data->hist[cpu].thread[bucket]); in timerlat_print_stats()
658 total += data->hist[cpu].user[bucket]; in timerlat_print_stats()
660 data->hist[cpu].user[bucket]); in timerlat_print_stats()
678 for (cpu = 0; cpu < data->nr_cpus; cpu++) { in timerlat_print_stats()
682 if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) in timerlat_print_stats()
687 data->hist[cpu].irq[data->entries]); in timerlat_print_stats()
691 data->hist[cpu].thread[data->entries]); in timerlat_print_stats()
695 data->hist[cpu].user[data->entries]); in timerlat_print_stats()
701 timerlat_print_summary(params, trace, data); in timerlat_print_stats()
702 timerlat_print_stats_all(params, trace, data); in timerlat_print_stats()
1130 tool->data = timerlat_alloc_histogram(nr_cpus, params->entries, params->bucket_size); in timerlat_init_hist()
1131 if (!tool->data) in timerlat_init_hist()
1478 timerlat_free_histogram(tool->data); in timerlat_hist_main()