| /linux/tools/testing/selftests/bpf/prog_tests/ |
| A D | user_ringbuf.c | 239 struct user_ring_buffer *ringbuf; in test_user_ringbuf_post_misaligned() local 248 user_ring_buffer__free(ringbuf); in test_user_ringbuf_post_misaligned() 264 user_ring_buffer__free(ringbuf); in test_user_ringbuf_post_producer_wrong_offset() 280 user_ring_buffer__free(ringbuf); in test_user_ringbuf_post_larger_than_ringbuf_sz() 296 err = write_samples(ringbuf, 2); in test_user_ringbuf_basic() 303 user_ring_buffer__free(ringbuf); in test_user_ringbuf_basic() 328 user_ring_buffer__free(ringbuf); in test_user_ringbuf_sample_full_ring_buffer() 352 user_ring_buffer__free(ringbuf); in test_user_ringbuf_post_alignment_autoadjust() 370 user_ring_buffer__free(ringbuf); in test_user_ringbuf_overfill() 415 user_ring_buffer__free(ringbuf); in test_user_ringbuf_discards_properly_ignored() [all …]
|
| A D | ringbuf.c | 67 static struct ring_buffer *ringbuf; variable 106 rb_fd = skel->maps.ringbuf.map_fd; in ringbuf_write_subtest() 136 ring_buffer__free(ringbuf); in ringbuf_write_subtest() 163 rb_fd = skel->maps.ringbuf.map_fd; in ringbuf_subtest() 212 ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd, in ringbuf_subtest() 266 err = ring_buffer__poll(ringbuf, 0); in ringbuf_subtest() 380 ring_buffer__free(ringbuf); in ringbuf_subtest() 419 ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd, in ringbuf_n_subtest() 441 ring_buffer__free(ringbuf); in ringbuf_n_subtest() 480 ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd, in ringbuf_map_key_subtest() [all …]
|
| A D | ringbuf_multi.c | 44 struct ring_buffer *ringbuf = NULL; in test_ringbuf_multi() local 84 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), in test_ringbuf_multi() 86 if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) in test_ringbuf_multi() 90 ring = ring_buffer__ring(ringbuf, 0); in test_ringbuf_multi() 94 ring = ring_buffer__ring(ringbuf, 1); in test_ringbuf_multi() 97 err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2), in test_ringbuf_multi() 103 ring = ring_buffer__ring(ringbuf, 0); in test_ringbuf_multi() 126 err = ring_buffer__poll(ringbuf, -1); in test_ringbuf_multi() 131 err = ring_buffer__poll(ringbuf, 0); in test_ringbuf_multi() 145 ring_buffer__free(ringbuf); in test_ringbuf_multi()
|
| A D | test_ima.c | 68 struct ring_buffer *ringbuf = NULL; in test_test_ima() local 80 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), in test_test_ima() 82 if (!ASSERT_OK_PTR(ringbuf, "ringbuf")) in test_test_ima() 108 err = ring_buffer__consume(ringbuf); in test_test_ima() 123 err = ring_buffer__consume(ringbuf); in test_test_ima() 154 err = ring_buffer__consume(ringbuf); in test_test_ima() 181 err = ring_buffer__consume(ringbuf); in test_test_ima() 212 err = ring_buffer__consume(ringbuf); in test_test_ima() 230 err = ring_buffer__consume(ringbuf); in test_test_ima() 238 ring_buffer__free(ringbuf); in test_test_ima()
|
| A D | unpriv_bpf_disabled.c | 64 struct ring_buffer *ringbuf = NULL; in test_unpriv_bpf_disabled_positive() local 83 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL); in test_unpriv_bpf_disabled_positive() 84 if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) in test_unpriv_bpf_disabled_positive() 92 ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume"); in test_unpriv_bpf_disabled_positive() 138 if (ringbuf) in test_unpriv_bpf_disabled_positive() 139 ring_buffer__free(ringbuf); in test_unpriv_bpf_disabled_positive() 252 map_fds[5] = bpf_map__fd(skel->maps.ringbuf); in test_unpriv_bpf_disabled()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| A D | dynptr_fail.c | 58 } ringbuf SEC(".maps"); 86 bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); in ringbuf_missing_release1() 121 bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); in missing_release_callback_fn() 237 bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr); in data_slice_out_of_bounds_ringbuf() 364 bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr); in data_slice_missing_null_check1() 486 bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr); in invalid_write3() 713 __imm_addr(ringbuf) in dynptr_pruning_overwrite() 744 __imm_addr(ringbuf) in dynptr_pruning_stacksafe() 816 __imm_addr(ringbuf) in dynptr_pruning_type_confusion() 856 __imm_addr(ringbuf) in dynptr_var_off_overwrite() [all …]
|
| A D | test_ringbuf.c | 19 } ringbuf SEC(".maps"); 48 sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); in test_ringbuf() 63 bpf_ringbuf_output(&ringbuf, sample, sizeof(*sample), flags); in test_ringbuf() 71 avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA); in test_ringbuf() 72 ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE); in test_ringbuf() 73 cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS); in test_ringbuf() 74 prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS); in test_ringbuf()
|
| A D | ringbuf_bench.c | 13 } ringbuf SEC(".maps"); 30 sz = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA); in get_flags() 42 sample = bpf_ringbuf_reserve(&ringbuf, in bench_ringbuf() 55 if (bpf_ringbuf_output(&ringbuf, &sample_val, in bench_ringbuf()
|
| A D | dynptr_success.c | 27 } ringbuf SEC(".maps"); 47 bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr); in test_read_write() 149 err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); in test_ringbuf() 224 err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr); in test_adjust() 276 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { in test_adjust_err() 333 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { in test_zero_size_dynptr() 384 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) { in test_dynptr_is_null() 396 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) { in test_dynptr_is_null() 446 if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) { in test_dynptr_is_rdonly()
|
| A D | test_ringbuf_write.c | 11 } ringbuf SEC(".maps"); 29 sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); in test_ringbuf_write() 33 sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0); in test_ringbuf_write()
|
| A D | test_ringbuf_n.c | 22 } ringbuf SEC(".maps"); 36 sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); in test_ringbuf_n()
|
| A D | test_ringbuf_map_key.c | 19 } ringbuf SEC(".maps"); 44 sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); in test_ringbuf_mem_map_key()
|
| A D | ima.c | 17 } ringbuf SEC(".maps"); 44 sample = bpf_ringbuf_reserve(&ringbuf, sizeof(u64), 0); in ima_test_common()
|
| A D | test_unpriv_bpf_disabled.c | 52 } ringbuf SEC(".maps"); 72 bpf_ringbuf_output(&ringbuf, &ringbuf_val, sizeof(ringbuf_val), 0); in sys_nanosleep_enter()
|
| A D | user_ringbuf_fail.c | 25 } ringbuf SEC(".maps"); 205 bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, dynptr); in try_reinit_dynptr_ringbuf()
|
| A D | test_d_path_check_types.c | 12 } ringbuf SEC(".maps");
|
| A D | test_kfunc_dynptr_param.c | 24 } ringbuf SEC(".maps");
|
| A D | verifier_iterating_callbacks.c | 15 } ringbuf SEC(".maps"); 145 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0); in unsafe_ringbuf_drain()
|
| A D | map_ptr_kern.c | 660 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf; in check_ringbuf() local 663 VERIFY(check(&ringbuf->map, map, 0, 0, page_size)); in check_ringbuf()
|
| /linux/tools/testing/selftests/bpf/benchs/ |
| A D | bench_ringbufs.c | 124 struct ring_buffer *ringbuf; member 154 bpf_map__set_max_entries(skel->maps.ringbuf, args.ringbuf_sz); in ringbuf_setup_skeleton() 176 ctx->ringbuf = ring_buffer__new(bpf_map__fd(ctx->skel->maps.ringbuf), in ringbuf_libbpf_setup() 178 if (!ctx->ringbuf) { in ringbuf_libbpf_setup() 194 while (ring_buffer__poll(ctx->ringbuf, -1) >= 0) { in ringbuf_libbpf_consumer() 213 struct ringbuf_custom ringbuf; member 243 r = &ctx->ringbuf; in ringbuf_custom_setup() 244 r->map_fd = bpf_map__fd(ctx->skel->maps.ringbuf); in ringbuf_custom_setup() 335 ringbuf_custom_process_ring(&ctx->ringbuf); in ringbuf_custom_consumer()
|
| /linux/Documentation/bpf/ |
| A D | other.rst | 8 ringbuf
|
| A D | ringbuf.rst | 42 Additionally, given the performance of BPF ringbuf, many use cases would just 84 BPF ringbuf provides two sets of APIs to BPF programs: 194 Another feature that distinguishes BPF ringbuf from perf ring buffer is
|
| /linux/tools/lib/bpf/ |
| A D | Build | 3 btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
|
| /linux/drivers/gpu/drm/panthor/ |
| A D | panthor_sched.c | 396 struct panthor_kernel_bo *ringbuf; member 768 } ringbuf; member 840 panthor_kernel_bo_destroy(queue->ringbuf); in group_free_queue() 1350 if (cs_extract >= job->ringbuf.end) in cs_slot_process_fault_event_locked() 1353 if (cs_extract < job->ringbuf.start) in cs_slot_process_fault_event_locked() 2917 memcpy(queue->ringbuf->kmap + ringbuf_insert, in queue_run_job() 2926 job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs); in queue_run_job() 2934 queue->iface.input->insert = job->ringbuf.end; in queue_run_job() 3054 if (IS_ERR(queue->ringbuf)) { in group_create_queue() 3055 ret = PTR_ERR(queue->ringbuf); in group_create_queue() [all …]
|
| /linux/kernel/bpf/ |
| A D | Makefile | 12 obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|