Lines Matching refs:cpu_buffer

560 	struct ring_buffer_per_cpu	*cpu_buffer;  member
621 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
624 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
625 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
648 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
695 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
702 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
705 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
706 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
712 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
718 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
756 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
760 nr_pages = cpu_buffer->nr_pages; in full_hit()
790 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters() local
794 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
799 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
800 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
816 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
835 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
837 if (!cpu_buffer) in ring_buffer_wake_waiters()
839 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
848 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit() local
855 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
864 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
865 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
868 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
869 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
870 cpu_buffer->shortest_full = full; in rb_watermark_hit()
872 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
947 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
965 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
966 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
1006 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1016 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1017 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1258 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1262 head = cpu_buffer->head_page; in rb_head_page_activate()
1271 if (cpu_buffer->ring_meta) { in rb_head_page_activate()
1272 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_head_page_activate()
1288 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1293 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1295 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1299 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1322 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1327 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1331 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1336 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1340 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1345 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1357 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1364 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1368 list = cpu_buffer->pages; in rb_set_head_page()
1369 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1372 page = head = cpu_buffer->head_page; in rb_set_head_page()
1382 cpu_buffer->head_page = page; in rb_set_head_page()
1389 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1409 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1439 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1465 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1466 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1470 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1475 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); in rb_check_bpage()
1490 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1492 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1495 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1499 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1504 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1508 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1592 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer() argument
1598 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1613 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1750 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events() argument
1752 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_meta_validate_events()
1763 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1769 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events()
1770 local_set(&cpu_buffer->reader_page->entries, ret); in rb_meta_validate_events()
1772 head_page = cpu_buffer->head_page; in rb_meta_validate_events()
1775 if (head_page == cpu_buffer->reader_page && in rb_meta_validate_events()
1776 head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
1783 if (head_page == cpu_buffer->reader_page) in rb_meta_validate_events()
1786 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1789 cpu_buffer->cpu); in rb_meta_validate_events()
1794 local_set(&cpu_buffer->head_page->entries, ret); in rb_meta_validate_events()
1796 if (head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
1800 if (head_page != cpu_buffer->commit_page) { in rb_meta_validate_events()
1802 cpu_buffer->cpu); in rb_meta_validate_events()
1806 local_set(&cpu_buffer->entries, entries); in rb_meta_validate_events()
1807 local_set(&cpu_buffer->entries_bytes, entry_bytes); in rb_meta_validate_events()
1809 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
1818 local_set(&cpu_buffer->reader_page->entries, 0); in rb_meta_validate_events()
1819 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_meta_validate_events()
1901 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start() local
1902 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rbm_start()
1926 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show() local
1927 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rbm_show()
1973 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update() argument
1976 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_meta_buffer_update()
1979 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
1982 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
1983 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
1987 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
1990 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages()
2028 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2034 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2038 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
2048 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2053 rb_meta_buffer_update(cpu_buffer, bpage); in __rb_allocate_pages()
2057 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2059 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2065 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2086 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
2093 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
2101 cpu_buffer->pages = pages.next; in rb_allocate_pages()
2104 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
2106 rb_check_pages(cpu_buffer); in rb_allocate_pages()
2114 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
2120 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
2122 if (!cpu_buffer) in rb_allocate_cpu_buffer()
2125 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2126 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2127 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
2128 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2129 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
2130 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
2131 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
2132 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
2133 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
2134 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
2135 mutex_init(&cpu_buffer->mapping_lock); in rb_allocate_cpu_buffer()
2142 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2144 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2151 cpu_buffer->mapped = 1; in rb_allocate_cpu_buffer()
2152 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2153 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2156 if (cpu_buffer->ring_meta->head_buffer) in rb_allocate_cpu_buffer()
2157 rb_meta_buffer_update(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2162 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2169 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
2170 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
2172 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
2176 rb_meta_validate_events(cpu_buffer); in rb_allocate_cpu_buffer()
2179 meta = cpu_buffer->ring_meta; in rb_allocate_cpu_buffer()
2181 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { in rb_allocate_cpu_buffer()
2183 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { in rb_allocate_cpu_buffer()
2185 if (!cpu_buffer->head_page) in rb_allocate_cpu_buffer()
2187 if (!cpu_buffer->commit_page) in rb_allocate_cpu_buffer()
2189 if (!cpu_buffer->tail_page) in rb_allocate_cpu_buffer()
2193 cpu_buffer->head_page in rb_allocate_cpu_buffer()
2194 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
2195 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
2197 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2199 if (cpu_buffer->ring_meta) in rb_allocate_cpu_buffer()
2203 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2206 return cpu_buffer; in rb_allocate_cpu_buffer()
2209 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
2212 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
2216 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
2218 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
2221 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
2223 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
2226 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
2236 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
2238 kfree(cpu_buffer); in rb_free_cpu_buffer()
2478 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
2489 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2490 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
2500 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
2506 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
2519 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
2534 cpu_buffer->pages = next_page; in rb_remove_pages()
2538 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2542 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2543 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2545 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2567 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2568 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2569 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2581 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
2587 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
2589 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2595 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2616 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); in rb_insert_pages()
2652 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2653 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2658 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2667 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2671 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2672 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2674 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2675 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2678 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2683 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2685 rb_update_pages(cpu_buffer); in update_pages_handler()
2686 complete(&cpu_buffer->update_done); in update_pages_handler()
2702 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2734 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2735 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2743 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2745 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2746 cpu_buffer->nr_pages; in ring_buffer_resize()
2750 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2756 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2757 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2758 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2774 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2775 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2780 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2781 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2788 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2790 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2798 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2799 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2803 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2804 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2809 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2811 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2819 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2824 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2825 cpu_buffer->nr_pages; in ring_buffer_resize()
2827 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2828 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2829 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2830 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2839 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2844 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2849 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2850 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2854 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2878 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2879 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_resize()
2880 rb_check_pages(cpu_buffer); in ring_buffer_resize()
2881 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_resize()
2894 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2895 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2897 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2900 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2930 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
2932 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2933 cpu_buffer->reader_page->read); in rb_reader_event()
3002 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
3004 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
3008 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index() argument
3012 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3019 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
3027 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
3028 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
3047 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head() argument
3050 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_head()
3064 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader() argument
3067 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_reader()
3068 void *old_reader = cpu_buffer->reader_page->page; in rb_update_meta_reader()
3073 cpu_buffer->reader_page->id = id; in rb_update_meta_reader()
3080 rb_update_meta_head(cpu_buffer, reader); in rb_update_meta_reader()
3091 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
3107 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
3128 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3129 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
3130 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
3132 if (cpu_buffer->ring_meta) in rb_handle_head_page()
3133 rb_update_meta_head(cpu_buffer, next_page); in rb_handle_head_page()
3163 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
3184 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
3201 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
3218 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
3225 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
3236 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
3239 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
3248 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
3251 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3315 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
3325 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3331 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
3335 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
3336 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
3350 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3374 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
3380 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
3384 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
3402 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
3403 cpu_buffer->tail_page) && in rb_move_tail()
3404 (cpu_buffer->commit_page == in rb_move_tail()
3405 cpu_buffer->reader_page))) { in rb_move_tail()
3406 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3412 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
3416 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3419 rb_end_commit(cpu_buffer); in rb_move_tail()
3421 local_inc(&cpu_buffer->committing); in rb_move_tail()
3428 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3435 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp() argument
3444 if (abs || rb_event_index(cpu_buffer, event)) { in rb_add_time_stamp()
3464 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
3474 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
3482 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
3514 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
3518 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
3535 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
3541 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
3544 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
3551 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
3595 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
3602 new_index = rb_event_index(cpu_buffer, event); in rb_try_to_discard()
3605 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3607 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3629 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3651 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3660 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
3662 local_inc(&cpu_buffer->committing); in rb_start_commit()
3663 local_inc(&cpu_buffer->commits); in rb_start_commit()
3667 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
3680 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3682 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3683 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3685 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3686 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3692 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3693 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3694 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3695 if (cpu_buffer->ring_meta) { in rb_set_commit_to_write()
3696 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_set_commit_to_write()
3697 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; in rb_set_commit_to_write()
3702 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
3703 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3707 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3708 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3709 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3710 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3723 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3727 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3731 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3732 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3736 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3739 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3740 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3742 local_dec(&cpu_buffer->committing); in rb_end_commit()
3752 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3753 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3754 local_inc(&cpu_buffer->committing); in rb_end_commit()
3772 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit() argument
3774 local_inc(&cpu_buffer->entries); in rb_commit()
3775 rb_end_commit(cpu_buffer); in rb_commit()
3779 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3787 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3788 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3790 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3793 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3796 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3799 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3802 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3804 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3807 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3808 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3810 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3883 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
3885 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3890 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3897 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3903 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3904 cpu_buffer->current_context = val; in trace_recursive_lock()
3910 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
3912 cpu_buffer->current_context &= in trace_recursive_unlock()
3913 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3934 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
3940 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3942 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3954 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
3959 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3961 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3975 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
3978 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3980 rb_commit(cpu_buffer); in ring_buffer_unlock_commit()
3982 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3984 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
4127 atomic_inc(&cpu_buffer->record_disabled); \
4141 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4174 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4178 cpu_buffer->cpu, ts, delta); in check_buffer()
4185 cpu_buffer->cpu, in check_buffer()
4194 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4202 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
4210 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
4214 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4215 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4217 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4242 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
4252 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4253 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
4254 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
4259 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
4273 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
4279 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4287 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4288 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
4291 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4327 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
4339 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
4346 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
4360 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
4371 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4372 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
4373 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
4380 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4383 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4402 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
4405 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
4416 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
4438 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
4453 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4455 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
4461 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
4464 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
4471 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
4485 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
4489 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
4492 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4515 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
4540 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
4547 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4554 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4556 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
4557 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
4561 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
4563 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
4587 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
4603 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4605 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
4611 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
4614 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
4622 rb_commit(cpu_buffer); in ring_buffer_write()
4624 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
4629 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
4638 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
4640 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
4641 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
4642 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4787 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4792 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4793 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4807 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4812 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4813 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4824 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4826 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4827 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4838 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4845 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4846 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4851 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4852 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4854 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
4857 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4870 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4876 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4877 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4890 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4895 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4897 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
4909 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
4915 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4916 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4932 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
4938 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4939 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4954 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
4960 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4961 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4975 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
4980 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4981 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4994 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
5000 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5001 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
5017 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
5023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5024 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
5033 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
5036 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
5037 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
5041 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
5042 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
5045 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
5046 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
5062 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
5068 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
5070 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5072 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5082 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
5091 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
5092 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
5093 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
5094 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5108 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5120 iter->head == rb_page_size(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
5125 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
5136 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
5141 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
5142 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
5146 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
5150 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
5180 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
5185 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
5188 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5195 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
5204 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
5209 reader = cpu_buffer->reader_page; in rb_get_reader_page()
5212 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
5216 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
5217 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
5222 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
5226 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
5232 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
5233 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
5234 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
5235 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
5241 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
5244 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
5245 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
5252 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
5255 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
5267 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
5280 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
5288 if (cpu_buffer->ring_meta) in rb_get_reader_page()
5289 rb_update_meta_reader(cpu_buffer, reader); in rb_get_reader_page()
5296 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
5297 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
5299 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
5302 cpu_buffer->reader_page = reader; in rb_get_reader_page()
5303 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
5305 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
5306 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
5307 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
5315 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
5317 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
5337 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
5355 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
5361 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
5364 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
5367 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
5370 cpu_buffer->read++; in rb_advance_reader()
5372 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
5375 cpu_buffer->reader_page->read += length; in rb_advance_reader()
5376 cpu_buffer->read_bytes += length; in rb_advance_reader()
5381 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
5383 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
5399 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
5408 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
5410 return cpu_buffer->lost_events; in rb_lost_events()
5414 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
5430 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
5433 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
5437 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
5442 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5455 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5462 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5463 cpu_buffer->cpu, ts); in rb_buffer_peek()
5466 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5471 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
5472 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5473 cpu_buffer->cpu, ts); in rb_buffer_peek()
5476 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
5480 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5491 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
5498 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
5499 buffer = cpu_buffer->buffer; in rb_iter_peek()
5506 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
5507 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
5508 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
5525 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
5555 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5556 cpu_buffer->cpu, ts); in rb_iter_peek()
5566 cpu_buffer->cpu, ts); in rb_iter_peek()
5571 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
5578 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
5581 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5594 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5598 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5603 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
5606 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5623 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
5633 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
5634 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
5636 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
5637 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
5671 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5676 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5678 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5701 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5713 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5715 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
5717 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
5719 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5720 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
5723 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
5756 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
5774 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5776 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5778 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5812 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5818 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5820 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5821 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5823 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5824 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5837 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5841 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5842 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
5843 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5845 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5860 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
5863 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5867 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5908 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page() argument
5910 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_update_meta_page()
5915 meta->reader.read = cpu_buffer->reader_page->read; in rb_update_meta_page()
5916 meta->reader.id = cpu_buffer->reader_page->id; in rb_update_meta_page()
5917 meta->reader.lost_events = cpu_buffer->lost_events; in rb_update_meta_page()
5919 meta->entries = local_read(&cpu_buffer->entries); in rb_update_meta_page()
5920 meta->overrun = local_read(&cpu_buffer->overrun); in rb_update_meta_page()
5921 meta->read = cpu_buffer->read; in rb_update_meta_page()
5924 flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page)); in rb_update_meta_page()
5928 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
5932 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
5934 cpu_buffer->head_page in rb_reset_cpu()
5935 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5936 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5937 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5941 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5942 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5944 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5945 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5946 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5948 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5949 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5950 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5951 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5952 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5953 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5954 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5955 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5956 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5957 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5958 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5959 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5960 cpu_buffer->read = 0; in rb_reset_cpu()
5961 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5963 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5964 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5966 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5968 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5969 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5971 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
5972 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5974 if (cpu_buffer->mapped) { in rb_reset_cpu()
5975 rb_update_meta_page(cpu_buffer); in rb_reset_cpu()
5976 if (cpu_buffer->ring_meta) { in rb_reset_cpu()
5977 struct ring_buffer_meta *meta = cpu_buffer->ring_meta; in rb_reset_cpu()
5984 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
5988 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5990 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5993 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5995 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
5997 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6000 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
6010 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
6019 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6020 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6025 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
6027 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6028 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6031 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); in ring_buffer_reset_cpu()
6048 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
6056 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6058 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6059 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6066 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6072 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
6075 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
6078 meta = rb_range_meta(buffer, 0, cpu_buffer->cpu); in ring_buffer_reset_online_cpus()
6082 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6083 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6095 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
6102 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6104 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6105 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
6112 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6114 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
6116 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
6117 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6130 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
6138 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6140 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
6141 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
6142 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
6160 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
6168 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6170 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
6171 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
6172 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
6292 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
6305 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6307 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6309 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
6310 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6311 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
6314 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6322 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6348 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
6356 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6367 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6369 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
6370 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6374 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6421 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
6453 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6455 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
6459 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6465 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
6475 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
6476 cpu_buffer->mapped) { in ring_buffer_read_page()
6477 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
6490 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
6503 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
6518 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
6525 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6538 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
6539 cpu_buffer->read_bytes += rb_page_size(reader); in ring_buffer_read_page()
6560 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6586 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6656 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set() local
6697 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6699 if (cpu_buffer->mapped) { in ring_buffer_subbuf_order_set()
6712 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
6718 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6719 if (__rb_allocate_pages(cpu_buffer, nr_pages, in ring_buffer_subbuf_order_set()
6720 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
6735 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6737 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6740 rb_head_page_deactivate(cpu_buffer); in ring_buffer_subbuf_order_set()
6749 list_add(&old_pages, cpu_buffer->pages); in ring_buffer_subbuf_order_set()
6750 list_add(&cpu_buffer->reader_page->list, &old_pages); in ring_buffer_subbuf_order_set()
6753 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
6755 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
6758 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
6759 list_del_init(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6761 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
6762 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
6763 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
6765 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
6766 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
6768 old_free_data_page = cpu_buffer->free_page; in ring_buffer_subbuf_order_set()
6769 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
6771 rb_head_page_activate(cpu_buffer); in ring_buffer_subbuf_order_set()
6773 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6782 rb_check_pages(cpu_buffer); in ring_buffer_subbuf_order_set()
6798 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6800 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6803 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6813 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page() argument
6817 if (cpu_buffer->meta_page) in rb_alloc_meta_page()
6824 cpu_buffer->meta_page = page_to_virt(page); in rb_alloc_meta_page()
6829 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page() argument
6831 unsigned long addr = (unsigned long)cpu_buffer->meta_page; in rb_free_meta_page()
6834 cpu_buffer->meta_page = NULL; in rb_free_meta_page()
6837 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page() argument
6840 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_setup_ids_meta_page()
6841 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; in rb_setup_ids_meta_page()
6845 subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page; in rb_setup_ids_meta_page()
6846 cpu_buffer->reader_page->id = id++; in rb_setup_ids_meta_page()
6848 first_subbuf = subbuf = rb_set_head_page(cpu_buffer); in rb_setup_ids_meta_page()
6861 cpu_buffer->subbuf_ids = subbuf_ids; in rb_setup_ids_meta_page()
6865 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
6868 rb_update_meta_page(cpu_buffer); in rb_setup_ids_meta_page()
6874 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer() local
6879 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
6881 mutex_lock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6883 if (!cpu_buffer->user_mapped) { in rb_get_mapped_buffer()
6884 mutex_unlock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6888 return cpu_buffer; in rb_get_mapped_buffer()
6891 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer() argument
6893 mutex_unlock(&cpu_buffer->mapping_lock); in rb_put_mapped_buffer()
6900 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped() argument
6905 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_inc_dec_mapped()
6908 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) in __rb_inc_dec_mapped()
6911 if (inc && cpu_buffer->mapped == UINT_MAX) in __rb_inc_dec_mapped()
6914 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) in __rb_inc_dec_mapped()
6917 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
6918 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
6921 cpu_buffer->user_mapped++; in __rb_inc_dec_mapped()
6922 cpu_buffer->mapped++; in __rb_inc_dec_mapped()
6924 cpu_buffer->user_mapped--; in __rb_inc_dec_mapped()
6925 cpu_buffer->mapped--; in __rb_inc_dec_mapped()
6928 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
6929 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
6946 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
6960 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
6973 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_map_vma()
6975 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ in __rb_map_vma()
6991 pages[p++] = virt_to_page(cpu_buffer->meta_page); in __rb_map_vma()
7012 struct page *page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); in __rb_map_vma()
7037 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
7047 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map() local
7054 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7056 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7058 if (cpu_buffer->user_mapped) { in ring_buffer_map()
7059 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7061 err = __rb_inc_dec_mapped(cpu_buffer, true); in ring_buffer_map()
7062 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7069 err = rb_alloc_meta_page(cpu_buffer); in ring_buffer_map()
7074 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); in ring_buffer_map()
7076 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7081 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_map()
7087 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7088 rb_setup_ids_meta_page(cpu_buffer, subbuf_ids); in ring_buffer_map()
7090 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7092 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7094 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7096 cpu_buffer->mapped++; in ring_buffer_map()
7097 cpu_buffer->user_mapped = 1; in ring_buffer_map()
7098 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7100 kfree(cpu_buffer->subbuf_ids); in ring_buffer_map()
7101 cpu_buffer->subbuf_ids = NULL; in ring_buffer_map()
7102 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7107 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7114 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap() local
7121 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7123 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7125 if (!cpu_buffer->user_mapped) { in ring_buffer_unmap()
7128 } else if (cpu_buffer->user_mapped > 1) { in ring_buffer_unmap()
7129 __rb_inc_dec_mapped(cpu_buffer, false); in ring_buffer_unmap()
7134 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7137 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) in ring_buffer_unmap()
7138 cpu_buffer->mapped--; in ring_buffer_unmap()
7139 cpu_buffer->user_mapped = 0; in ring_buffer_unmap()
7141 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7143 kfree(cpu_buffer->subbuf_ids); in ring_buffer_unmap()
7144 cpu_buffer->subbuf_ids = NULL; in ring_buffer_unmap()
7145 rb_free_meta_page(cpu_buffer); in ring_buffer_unmap()
7146 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_unmap()
7151 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7158 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader() local
7164 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7165 if (IS_ERR(cpu_buffer)) in ring_buffer_map_get_reader()
7166 return (int)PTR_ERR(cpu_buffer); in ring_buffer_map_get_reader()
7168 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7171 if (rb_per_cpu_empty(cpu_buffer)) in ring_buffer_map_get_reader()
7174 reader_size = rb_page_size(cpu_buffer->reader_page); in ring_buffer_map_get_reader()
7181 if (cpu_buffer->reader_page->read < reader_size) { in ring_buffer_map_get_reader()
7182 while (cpu_buffer->reader_page->read < reader_size) in ring_buffer_map_get_reader()
7183 rb_advance_reader(cpu_buffer); in ring_buffer_map_get_reader()
7187 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_map_get_reader()
7192 missed_events = cpu_buffer->lost_events; in ring_buffer_map_get_reader()
7194 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { in ring_buffer_map_get_reader()
7225 cpu_buffer->lost_events = 0; in ring_buffer_map_get_reader()
7231 flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page)); in ring_buffer_map_get_reader()
7233 rb_update_meta_page(cpu_buffer); in ring_buffer_map_get_reader()
7235 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7236 rb_put_mapped_buffer(cpu_buffer); in ring_buffer_map_get_reader()