Lines Matching refs:cpu

312 #define for_each_buffer_cpu(buffer, cpu)		\  argument
313 for_each_cpu(cpu, buffer->cpumask)
315 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
316 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
478 int cpu; member
735 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
741 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
742 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
743 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
759 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
774 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
819 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
827 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wake_waiters()
830 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
831 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
837 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) in ring_buffer_wake_waiters()
840 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
851 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) in rb_watermark_hit() argument
857 if (cpu == RING_BUFFER_ALL_CPUS) in rb_watermark_hit()
860 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
862 if (!ring_buffer_empty_cpu(buffer, cpu)) { in rb_watermark_hit()
871 ret = !pagebusy && full_hit(buffer, cpu, full); in rb_watermark_hit()
884 int cpu, int full, ring_buffer_cond_fn cond, void *data) in rb_wait_cond() argument
886 if (rb_watermark_hit(buffer, cpu, full)) in rb_wait_cond()
949 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, in ring_buffer_wait() argument
963 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
968 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
970 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
988 rb_wait_cond(rbwork, buffer, cpu, full, cond, data)); in ring_buffer_wait()
1008 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1014 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_poll_wait()
1018 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1028 if (rb_watermark_hit(buffer, cpu, full)) in ring_buffer_poll_wait()
1065 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1066 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1117 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1590 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu) in rb_range_meta() argument
1617 if (cpu) { in rb_range_meta()
1622 if (cpu > 1) { in rb_range_meta()
1633 ptr += size * (cpu - 2); in rb_range_meta()
1660 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1750 static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu, in rb_cpu_meta_valid() argument
1769 pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu); in rb_cpu_meta_valid()
1775 pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu); in rb_cpu_meta_valid()
1787 pr_info("Ring buffer boot meta [%d] array out of range\n", cpu); in rb_cpu_meta_valid()
1792 pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu); in rb_cpu_meta_valid()
1797 pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu); in rb_cpu_meta_valid()
1810 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu, in rb_read_data_buffer() argument
1862 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu) in rb_validate_buffer() argument
1869 return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta); in rb_validate_buffer()
1887 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1923 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1935 pr_info("Ring buffer [%d] rewound %d pages\n", cpu_buffer->cpu, i); in rb_meta_validate_events()
2002 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
2005 cpu_buffer->cpu); in rb_meta_validate_events()
2023 cpu_buffer->cpu); in rb_meta_validate_events()
2030 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
2056 int cpu; in rb_range_meta_init() local
2066 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in rb_range_meta_init()
2069 meta = rb_range_meta(buffer, nr_pages, cpu); in rb_range_meta_init()
2071 if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) { in rb_range_meta_init()
2081 if (cpu < nr_cpu_ids - 1) in rb_range_meta_init()
2082 next_meta = rb_range_meta(buffer, nr_pages, cpu + 1); in rb_range_meta_init()
2170 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu) in ring_buffer_meta_seq_init() argument
2180 m->private = buffer->buffers[cpu]; in ring_buffer_meta_seq_init()
2241 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2247 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2270 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2325 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
2334 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
2338 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2351 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
2365 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2373 page = alloc_pages_node(cpu_to_node(cpu), in rb_allocate_cpu_buffer()
2462 int cpu; in alloc_buffer() local
2559 cpu = raw_smp_processor_id(); in alloc_buffer()
2560 cpumask_set_cpu(cpu, buffer->cpumask); in alloc_buffer()
2561 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in alloc_buffer()
2562 if (!buffer->buffers[cpu]) in alloc_buffer()
2574 for_each_buffer_cpu(buffer, cpu) { in alloc_buffer()
2575 if (buffer->buffers[cpu]) in alloc_buffer()
2576 rb_free_cpu_buffer(buffer->buffers[cpu]); in alloc_buffer()
2656 int cpu; in ring_buffer_free() local
2662 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
2663 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2927 int cpu, err; in ring_buffer_resize() local
2962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2971 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2972 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3001 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3002 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3007 if (!cpu_online(cpu)) { in ring_buffer_resize()
3013 if (cpu != smp_processor_id()) { in ring_buffer_resize()
3015 schedule_work_on(cpu, in ring_buffer_resize()
3025 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3026 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3030 if (cpu_online(cpu)) in ring_buffer_resize()
3099 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3111 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
3114 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
4024 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
4155 int cpu; in ring_buffer_nest_start() local
4159 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
4160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4175 int cpu; in ring_buffer_nest_end() local
4178 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
4179 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4196 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
4198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4394 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4398 cpu_buffer->cpu, ts, delta); in check_buffer()
4405 cpu_buffer->cpu, in check_buffer()
4665 int cpu; in ring_buffer_lock_reserve() local
4673 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
4675 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
4678 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4766 int cpu; in ring_buffer_discard_commit() local
4771 cpu = smp_processor_id(); in ring_buffer_discard_commit()
4772 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4813 int cpu; in ring_buffer_write() local
4820 cpu = raw_smp_processor_id(); in ring_buffer_write()
4822 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
4825 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4983 bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_is_on_cpu() argument
4987 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_is_on_cpu()
5003 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
5007 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
5010 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
5023 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
5027 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
5030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
5040 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
5047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
5050 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
5073 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
5078 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
5081 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
5093 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
5097 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
5100 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
5112 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
5120 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
5135 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
5140 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
5143 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
5157 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
5162 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
5165 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5178 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
5182 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
5185 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5201 int cpu; in ring_buffer_entries() local
5204 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
5205 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5224 int cpu; in ring_buffer_overruns() local
5227 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
5228 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5668 cpu_buffer->cpu, ts); in rb_buffer_peek()
5678 cpu_buffer->cpu, ts); in rb_buffer_peek()
5761 cpu_buffer->cpu, ts); in rb_iter_peek()
5771 cpu_buffer->cpu, ts); in rb_iter_peek()
5825 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
5828 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5833 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5903 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5915 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5918 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5955 ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_start() argument
5960 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_start()
5975 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_start()
6035 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
6037 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
6040 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
6197 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
6199 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6201 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
6232 int cpu; in ring_buffer_reset_online_cpus() local
6237 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6238 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6247 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
6248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6273 int cpu; in ring_buffer_reset() local
6278 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6279 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6288 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
6289 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6311 int cpu; in ring_buffer_empty() local
6314 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
6315 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6335 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
6342 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
6345 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6369 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
6375 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
6376 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
6379 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
6380 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
6429 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
6430 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
6462 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
6469 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
6477 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6492 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
6517 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, in ring_buffer_free_read_page() argument
6525 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
6528 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6591 size_t len, int cpu, int full) in ring_buffer_read_page() argument
6593 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6602 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
6828 int cpu; in ring_buffer_subbuf_order_set() local
6858 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6860 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6871 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
6893 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6898 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
6901 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6962 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6963 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
7043 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu) in rb_get_mapped_buffer() argument
7047 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in rb_get_mapped_buffer()
7050 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
7216 int ring_buffer_map(struct trace_buffer *buffer, int cpu, in ring_buffer_map() argument
7223 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_map()
7226 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7279 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu) in ring_buffer_unmap() argument
7284 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_unmap()
7287 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7316 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) in ring_buffer_map_get_reader() argument
7324 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7391 cpu, missed_events, cpu_buffer->reader_page->page->time_stamp); in ring_buffer_map_get_reader()
7417 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
7425 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
7443 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
7444 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
7445 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
7447 cpu); in trace_rb_cpu_prepare()
7451 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
7487 int cpu; member
7594 int cpu = smp_processor_id(); in rb_ipi() local
7596 data = &rb_data[cpu]; in rb_ipi()
7617 int cpu; in test_ringbuffer() local
7634 for_each_online_cpu(cpu) { in test_ringbuffer()
7635 rb_data[cpu].buffer = buffer; in test_ringbuffer()
7636 rb_data[cpu].cpu = cpu; in test_ringbuffer()
7637 rb_data[cpu].cnt = cpu; in test_ringbuffer()
7638 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], in test_ringbuffer()
7639 cpu, "rbtester/%u"); in test_ringbuffer()
7640 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
7642 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
7675 for_each_online_cpu(cpu) { in test_ringbuffer()
7676 if (!rb_threads[cpu]) in test_ringbuffer()
7678 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
7687 for_each_online_cpu(cpu) { in test_ringbuffer()
7689 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
7713 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
7726 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()