| /tools/testing/selftests/bpf/progs/ |
| A D | rbtree.c | 65 bpf_rbtree_add(root, &n->node, less); in __add_three() 66 bpf_rbtree_add(root, &m->node, less); in __add_three() 75 bpf_rbtree_add(root, &n->node, less); in __add_three() 109 bpf_rbtree_add(&groot, &n->node, less); in rbtree_add_and_remove() 110 bpf_rbtree_add(&groot, &m->node, less); in rbtree_add_and_remove() 151 bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less); in rbtree_add_and_remove_array() 153 bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less); in rbtree_add_and_remove_array() 214 bpf_rbtree_add(&groot, &n->node, less); in rbtree_first_and_remove() 215 bpf_rbtree_add(&groot, &m->node, less); in rbtree_first_and_remove() 216 bpf_rbtree_add(&groot, &o->node, less); in rbtree_first_and_remove() [all …]
|
| A D | rbtree_fail.c | 20 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function 41 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_nolock_add() 56 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_nolock_remove() 89 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_remove_unadded_node() 147 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_add_to_multiple_trees() 150 bpf_rbtree_add(&groot2, &n->node, less); in rbtree_api_add_to_multiple_trees() 172 bpf_rbtree_add(&groot, res, less); in rbtree_api_use_unchecked_remove_retval() 192 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_add_release_unlock_escape() 238 bpf_rbtree_add(&groot, &node_a->node, less); in less__bad_fn_call_add()
|
| A D | refcounted_kptr_fail.c | 23 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function 45 bpf_rbtree_add(&groot, &n->node, less); in rbtree_refcounted_node_ref_escapes() 90 bpf_rbtree_add(&groot, &n->node, less); in rbtree_refcounted_node_ref_escapes_owning_input() 111 bpf_rbtree_add(&groot, &n->node, less); in BPF_PROG()
|
| A D | refcounted_kptr.c | 51 static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) in less() function 88 if (bpf_rbtree_add(root, &n->r, less)) { in __insert_in_tree_and_list() 131 if (bpf_rbtree_add(root, &m->r, less)) { in __stash_map_insert_tree() 468 bpf_rbtree_add(&broot, &n->r, less); in rbtree_wrong_owner_remove_fail_b() 518 bpf_rbtree_add(&root, &n->r, less); in BPF_PROG() 552 bpf_rbtree_add(&root, &n->r, less); in BPF_PROG()
|
| A D | local_kptr_stash.c | 78 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function 176 bpf_rbtree_add(&res->r, &n->node, less); in stash_local_with_root()
|
| /tools/build/tests/ex/empty2/ |
| A D | README | 2 to test proper nesting into Build-less directories.
|
| /tools/include/linux/ |
| A D | rbtree.h | 179 bool (*less)(struct rb_node *, const struct rb_node *)) in rb_add_cached() 187 if (less(node, parent)) { in rb_add_cached() 207 bool (*less)(struct rb_node *, const struct rb_node *)) in rb_add() 214 if (less(node, parent)) in rb_add()
|
| /tools/testing/selftests/net/packetdrill/ |
| A D | tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt | 3 // less than the current cwnd, and not big enough to bump up cwnd.
|
| A D | tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt | 3 // less than the current cwnd, but still big enough that in slow
|
| /tools/testing/selftests/bpf/ |
| A D | bpf_experimental.h | 120 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 124 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) argument
|
| /tools/sched_ext/include/scx/ |
| A D | common.bpf.h | 308 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 310 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) argument
|
| /tools/power/pm-graph/config/ |
| A D | example.cfg | 102 # merge loops of the same call if each is less than maxgap apart (def: 100us) 106 # merge loops of the same call if each is less than maxlen in length (def: 5ms)
|
| /tools/perf/Documentation/ |
| A D | perf-amd-ibs.txt | 48 plethora of events, counting mode (less interference), up to 6 parallel 94 Latency value which is a multiple of 128 incurs a little less profiling
|
| A D | itrace.txt | 25 q quicker (less detailed) decoding
|
| A D | perf-mem.txt | 78 - Latency value which is a multiple of 128 incurs a little less profiling
|
| A D | perf-script-perl.txt | 46 handler function; some of the less common ones aren't - those are
|
| A D | perf-c2c.txt | 63 - Latency value which is a multiple of 128 incurs a little less profiling
|
| A D | perf-intel-pt.txt | 157 the values are less accurate because the timing is less accurate. 173 that becomes less significant if the number of cycles is large. It may also be 983 q quicker (less detailed) decoding 1158 but much less detailed. Specifically, with the q option, the decoder does not 1187 less detail. The decoder decodes only extended PSB (PSB+) packets, getting the
|
| A D | perf-record.txt | 596 Also at some cases executing less output write syscalls with bigger data size 597 can take less time than executing more output write syscalls with smaller data
|
| /tools/scripts/ |
| A D | utilities.mak | 195 # To compare if a 3 component version is less thjan another, first use was to
|
| A D | syscall.tbl | 9 # and implement the less feature-full calls in user space.
|
| /tools/net/sunrpc/xdrgen/ |
| A D | README | 33 2. rpcgen-generated code is believed to be less efficient than code
|
| /tools/power/pm-graph/ |
| A D | README | 417 # merge loops of the same call if each is less than maxgap apart (def: 100us) 421 # merge loops of the same call if each is less than maxlen in length (def: 5ms)
|
| /tools/memory-model/Documentation/ |
| A D | recipes.txt | 481 first. Preserving order requires nothing less than full barriers:
|
| A D | access-marking.txt | 121 that provides the compiler much less scope for mischievous optimizations.
|