Lines Matching refs:rb

86 	struct bpf_ringbuf *rb;  member
103 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
140 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
142 if (rb) { in bpf_ringbuf_area_alloc()
144 rb->pages = pages; in bpf_ringbuf_area_alloc()
145 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
146 return rb; in bpf_ringbuf_area_alloc()
158 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
160 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
165 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
167 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc()
168 if (!rb) in bpf_ringbuf_alloc()
171 spin_lock_init(&rb->spinlock); in bpf_ringbuf_alloc()
172 atomic_set(&rb->busy, 0); in bpf_ringbuf_alloc()
173 init_waitqueue_head(&rb->waitq); in bpf_ringbuf_alloc()
174 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
176 rb->mask = data_sz - 1; in bpf_ringbuf_alloc()
177 rb->consumer_pos = 0; in bpf_ringbuf_alloc()
178 rb->producer_pos = 0; in bpf_ringbuf_alloc()
180 return rb; in bpf_ringbuf_alloc()
207 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
208 if (!rb_map->rb) { in ringbuf_map_alloc()
216 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) in bpf_ringbuf_free() argument
221 struct page **pages = rb->pages; in bpf_ringbuf_free()
222 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free()
224 vunmap(rb); in bpf_ringbuf_free()
235 bpf_ringbuf_free(rb_map->rb); in ringbuf_map_free()
275 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern()
296 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_user()
299 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) in ringbuf_avail_data_sz() argument
303 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz()
304 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz()
308 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb) in ringbuf_total_data_sz() argument
310 return rb->mask + 1; in ringbuf_total_data_sz()
319 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_kern()
321 if (ringbuf_avail_data_sz(rb_map->rb)) in ringbuf_map_poll_kern()
332 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_user()
334 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb)) in ringbuf_map_poll_user()
373 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, in bpf_ringbuf_rec_pg_off() argument
376 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; in bpf_ringbuf_rec_pg_off()
391 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) in __bpf_ringbuf_reserve() argument
401 if (len > ringbuf_total_data_sz(rb)) in __bpf_ringbuf_reserve()
404 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve()
407 if (!spin_trylock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
410 spin_lock_irqsave(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
413 prod_pos = rb->producer_pos; in __bpf_ringbuf_reserve()
419 if (new_prod_pos - cons_pos > rb->mask) { in __bpf_ringbuf_reserve()
420 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
424 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve()
425 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); in __bpf_ringbuf_reserve()
430 smp_store_release(&rb->producer_pos, new_prod_pos); in __bpf_ringbuf_reserve()
432 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
445 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_3()
460 struct bpf_ringbuf *rb; in bpf_ringbuf_commit() local
464 rb = bpf_ringbuf_restore_from_rec(hdr); in bpf_ringbuf_commit()
475 rec_pos = (void *)hdr - (void *)rb->data; in bpf_ringbuf_commit()
476 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit()
479 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
481 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
520 rec = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
540 struct bpf_ringbuf *rb; in BPF_CALL_2() local
542 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2()
546 return ringbuf_avail_data_sz(rb); in BPF_CALL_2()
548 return ringbuf_total_data_sz(rb); in BPF_CALL_2()
550 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2()
552 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()
585 sample = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
643 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size) in __bpf_user_ringbuf_peek() argument
650 prod_pos = smp_load_acquire(&rb->producer_pos); in __bpf_user_ringbuf_peek()
655 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_user_ringbuf_peek()
659 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask)); in __bpf_user_ringbuf_peek()
671 if (total_len > ringbuf_total_data_sz(rb)) in __bpf_user_ringbuf_peek()
685 smp_store_release(&rb->consumer_pos, cons_pos + total_len); in __bpf_user_ringbuf_peek()
692 *sample = (void *)((uintptr_t)rb->data + in __bpf_user_ringbuf_peek()
693 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask)); in __bpf_user_ringbuf_peek()
698 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags) in __bpf_user_ringbuf_sample_release() argument
707 consumer_pos = rb->consumer_pos; in __bpf_user_ringbuf_sample_release()
709 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size); in __bpf_user_ringbuf_sample_release()
715 struct bpf_ringbuf *rb; in BPF_CALL_4() local
724 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_4()
727 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1)) in BPF_CALL_4()
736 err = __bpf_user_ringbuf_peek(rb, &sample, &size); in BPF_CALL_4()
751 __bpf_user_ringbuf_sample_release(rb, size, flags); in BPF_CALL_4()
760 atomic_set(&rb->busy, 0); in BPF_CALL_4()
763 irq_work_queue(&rb->work); in BPF_CALL_4()
765 irq_work_queue(&rb->work); in BPF_CALL_4()