| /tools/testing/selftests/bpf/prog_tests/ |
| A D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
| /tools/cgroup/ |
| A D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
| /tools/testing/selftests/bpf/progs/ |
| A D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| A D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| A D | rcu_read_lock.c | 34 struct css_set *cgroups; in get_cgroup_id() local 42 cgroups = task->cgroups; in get_cgroup_id() 43 if (!cgroups) in get_cgroup_id() 45 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
| A D | cgrp_ls_sleepable.c | 86 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 118 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
|
| A D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| A D | profiler.inc.h | 255 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in populate_cgroup_info() 266 BPF_CORE_READ(task, cgroups, subsys[i]); in populate_cgroup_info() 627 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in raw_tracepoint__sched_process_exit()
|
| /tools/perf/util/ |
| A D | cgroup.c | 562 down_write(&env->cgroups.lock); in cgroup__findnew() 563 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 564 up_write(&env->cgroups.lock); in cgroup__findnew() 577 down_read(&env->cgroups.lock); in cgroup__find() 578 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 579 up_read(&env->cgroups.lock); in cgroup__find() 588 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 589 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 590 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 593 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
| A D | bpf-trace-summary.c | 23 static struct rb_root cgroups = RB_ROOT; variable 54 read_all_cgroups(&cgroups); in trace_prepare_bpf_summary() 339 struct cgroup *cgrp = __cgroup__find(&cgroups, data->key); in print_cgroup_stat() 448 if (!RB_EMPTY_ROOT(&cgroups)) { in trace_cleanup_bpf_summary() 451 rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node) in trace_cleanup_bpf_summary() 454 cgroups = RB_ROOT; in trace_cleanup_bpf_summary()
|
| A D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups, bool open_cgroup);
|
| A D | lock-contention.h | 151 struct rb_root cgroups; member
|
| A D | bpf_lock_contention.c | 384 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 622 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 840 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 841 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 844 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
| A D | env.h | 123 } cgroups; member
|
| /tools/perf/Documentation/ |
| A D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
| A D | perf-lock.txt | 217 Show lock contention only in the given cgroups (comma separated list).
|
| A D | perf-trace.txt | 75 Look for cgroups to set at the /sys/fs/cgroup/perf_event directory, then 89 Multiple cgroups:
|
| /tools/perf/tests/shell/ |
| A D | record_bpf_filter.sh | 154 -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
|
| A D | record.sh | 216 if ! perf record -aB --synth=cgroup --all-cgroups -o "${perfdata}" ${testprog} 2> /dev/null
|
| /tools/perf/util/bpf_skel/ |
| A D | off_cpu.bpf.c | 155 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 166 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|
| A D | syscall_summary.bpf.c | 62 cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup); in get_current_cgroup_id()
|
| A D | bperf_cgroup.bpf.c | 97 cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_v1_idx()
|
| /tools/perf/util/bpf_skel/vmlinux/ |
| A D | vmlinux.h | 107 struct css_set *cgroups; member
|
| /tools/testing/selftests/mm/ |
| A D | charge_reserved_hugetlb.sh | 506 echo Test normal case, multiple cgroups. 552 echo Test normal case with write, multiple cgroups.
|
| /tools/bpf/bpftool/Documentation/ |
| A D | bpftool-cgroup.rst | 63 Iterate over all cgroups in *CGROUP_ROOT* and list all attached programs.
|