| /tools/lib/bpf/ |
| A D | ringbuf.c | 103 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 108 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add() 116 rb->rings[rb->ring_cnt] = r; in ring_buffer__add() 153 e = &rb->events[rb->ring_cnt]; in ring_buffer__add() 181 ringbuf_free_ring(rb, rb->rings[i]); in ring_buffer__free() 200 rb = calloc(1, sizeof(*rb)); in ring_buffer__new() 342 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); in ring_buffer__poll() 430 munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1)); in user_ringbuf_unmap_ring() 505 rb->data = tmp + rb->page_size; in user_ringbuf_map() 527 rb = calloc(1, sizeof(*rb)); in user_ring_buffer__new() [all …]
|
| A D | libbpf.h | 1366 LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); 1367 LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, 1369 LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); 1370 LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); 1371 LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n); 1372 LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); 1385 LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb, 1494 LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); 1537 LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, 1550 LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); [all …]
|
| /tools/testing/selftests/bpf/benchs/ |
| A D | run_bench_ringbufs.sh | 10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 43 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
|
| /tools/testing/selftests/bpf/progs/ |
| A D | verifier_map_in_map.c | 219 void *rb; member 226 void *rb; in __rb_event_reserve() local 230 rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot); in __rb_event_reserve() 231 if (!rb) in __rb_event_reserve() 234 rb_ctx.rb = rb; in __rb_event_reserve() 235 bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr); in __rb_event_reserve() 242 if (!ctx->rb) in __rb_event_submit()
|
| A D | refcounted_kptr.c | 144 struct bpf_rb_node *rb; in __read_from_tree() local 151 if (!rb) { in __read_from_tree() 164 rb = bpf_rbtree_remove(root, rb); in __read_from_tree() 166 if (!rb) in __read_from_tree() 509 struct bpf_rb_node *rb; in BPF_PROG() local 520 if (!rb) in BPF_PROG() 523 rb = bpf_rbtree_remove(&root, rb); in BPF_PROG() 524 if (!rb) in BPF_PROG() 554 if (!rb) in BPF_PROG() 557 rb = bpf_rbtree_remove(&root, rb); in BPF_PROG() [all …]
|
| A D | test_ringbuf_multi.c | 61 void *rb; in test_ringbuf() local 66 rb = bpf_map_lookup_elem(&ringbuf_arr, &target_ring); in test_ringbuf() 67 if (!rb) { in test_ringbuf() 72 sample = bpf_ringbuf_reserve(rb, sizeof(*sample), 0); in test_ringbuf()
|
| /tools/testing/selftests/perf_events/ |
| A D | mmap.c | 47 FIXTURE_VARIANT_ADD(perf_mmap, rb) in FIXTURE_VARIANT_ADD() argument 85 struct perf_event_mmap_page *rb; in FIXTURE_SETUP() local 116 if (rb == MAP_FAILED) { in FIXTURE_SETUP() 130 rb->aux_offset = AUX_OFFS; in FIXTURE_SETUP() 131 rb->aux_size = AUX_SIZE; in FIXTURE_SETUP() 137 munmap(rb, RB_SIZE); in FIXTURE_SETUP() 145 munmap(rb, RB_SIZE); in FIXTURE_SETUP() 162 ASSERT_NE(rb, MAP_FAILED); in FIXTURE_SETUP() 165 self->ptr = rb; in FIXTURE_SETUP() 172 rb->aux_offset = AUX_OFFS; in FIXTURE_SETUP() [all …]
|
| /tools/include/linux/ |
| A D | rbtree_augmented.h | 79 RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 81 while (rb != stop) { \ 82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 85 rb = rb_parent(&node->RBFIELD); \ 155 #define rb_color(rb) __rb_color((rb)->__rb_parent_color) argument 156 #define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) argument 157 #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) argument 159 static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) in rb_set_parent() argument 161 rb->__rb_parent_color = rb_color(rb) + (unsigned long)p; in rb_set_parent() 164 static inline void rb_set_parent_color(struct rb_node *rb, in rb_set_parent_color() argument [all …]
|
| A D | interval_tree_generic.h | 154 struct rb_node *rb = node->ITRB.rb_right, *prev; \ 164 if (rb) { \ 165 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ 173 rb = rb_parent(&node->ITRB); \ 174 if (!rb) \ 177 node = rb_entry(rb, ITSTRUCT, ITRB); \ 178 rb = node->ITRB.rb_right; \ 179 } while (prev == rb); \
|
| /tools/testing/selftests/powerpc/include/ |
| A D | reg.h | 109 #define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ argument 110 ((rb) << 11) | (((xs) >> 5))) 111 #define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) argument 112 #define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) argument
|
| /tools/perf/arch/x86/tests/ |
| A D | amd-ibs-period.c | 156 data_head = rb->data_head; in rb_read() 158 data_tail = rb->data_tail; in rb_read() 165 rb->data_tail += size; in rb_read() 176 rb->data_tail = data_head; in rb_skip() 178 rb->data_tail += size; in rb_skip() 309 void *rb; in __ibs_config_test() local 331 if (rb == MAP_FAILED) { in __ibs_config_test() 493 void *rb; in __ibs_period_constraint_test() local 518 if (rb == MAP_FAILED) { in __ibs_period_constraint_test() 794 void *rb; in __ibs_l3missonly_test() local [all …]
|
| /tools/tracing/rtla/src/ |
| A D | timerlat_bpf.c | 98 struct ring_buffer *rb; in timerlat_bpf_wait() local 101 rb = ring_buffer__new(bpf_map__fd(bpf->maps.signal_stop_tracing), in timerlat_bpf_wait() 103 retval = ring_buffer__poll(rb, timeout * 1000); in timerlat_bpf_wait() 104 ring_buffer__free(rb); in timerlat_bpf_wait()
|
| /tools/perf/util/ |
| A D | block-range.c | 15 struct rb_node *rb; in block_range__debug() local 18 for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) { in block_range__debug() 19 struct block_range *entry = rb_entry(rb, struct block_range, node); in block_range__debug()
|
| A D | lock-contention.h | 29 struct rb_node rb; /* used for sorting */ member 102 struct rb_node rb; member
|
| /tools/perf/ |
| A D | builtin-lock.c | 96 p = container_of(*rb, struct thread_stat, rb); in thread_stat_insert() 100 rb = &(*rb)->rb_left; in thread_stat_insert() 102 rb = &(*rb)->rb_right; in thread_stat_insert() 107 rb_link_node(&new->rb, parent, rb); in thread_stat_insert() 387 p = container_of(*rb, struct lock_stat, rb); in combine_lock_stats() 414 rb = &(*rb)->rb_left; in combine_lock_stats() 416 rb = &(*rb)->rb_right; in combine_lock_stats() 419 rb_link_node(&st->rb, parent, rb); in combine_lock_stats() 435 rb = &(*rb)->rb_left; in insert_to() 437 rb = &(*rb)->rb_right; in insert_to() [all …]
|
| /tools/memory-model/ |
| A D | linux-kernel.cat | 131 * out. They have been moved into the definitions of rcu-link and rb. 168 (* rb orders instructions just as pb does *) 169 let rb = prop ; rcu-fence ; hb* ; pb* ; [Marked] 171 irreflexive rb as rcu 178 * let xb = hb | pb | rb 192 let xbstar = (hb | pb | rb)*
|
| /tools/lib/ |
| A D | rbtree.c | 59 static inline void rb_set_black(struct rb_node *rb) in rb_set_black() argument 61 rb->__rb_parent_color += RB_BLACK; in rb_set_black()
|
| /tools/testing/selftests/net/mptcp/ |
| A D | mptcp_connect.c | 663 ssize_t rb = sizeof(rbuf); in copyfd_io_poll() local 667 if (rb + total_rlen > cfg_truncate) in copyfd_io_poll() 668 rb = cfg_truncate - total_rlen; in copyfd_io_poll() 669 len = read(peerfd, rbuf, rb); in copyfd_io_poll()
|
| /tools/testing/selftests/net/ |
| A D | tls.c | 1419 char rb[8001]; in test_mutliproc() local 1424 res = recv(self->cfd, rb, in test_mutliproc() 1425 left > sizeof(rb) ? sizeof(rb) : left, 0); in test_mutliproc()
|
| /tools/testing/vma/ |
| A D | vma_internal.h | 389 struct rb_node rb; member
|
| /tools/memory-model/Documentation/ |
| A D | explanation.txt | 30 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb 1513 RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb 1694 Finally, the LKMM defines the RCU-before (rb) relation in terms of 1697 details; the end result is that E ->rb F implies E must execute 1701 Guarantee by requiring that the rb relation does not contain a cycle. 2265 if they can be connected by a sequence of hb, pb, and rb links
|