Home
last modified time | relevance | path

Searched refs:cgroups (Results 1 – 25 of 31) sorted by relevance

12

/tools/testing/selftests/bpf/prog_tests/
A Dcgroup_hierarchical_stats.c54 } cgroups[] = { variable
64 #define N_CGROUPS ARRAY_SIZE(cgroups)
133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups()
137 cgroups[i].fd = fd; in setup_cgroups()
138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups()
147 close(cgroups[i].fd); in cleanup_cgroups()
175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes()
220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters()
221 cgroups[i].name); in check_attach_counters()
288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs()
[all …]
/tools/cgroup/
A Dmemcg_shrinker.py11 cgroups = {}
17 cgroups[ino] = path
20 return cgroups
44 cgroups = scan_cgroups("/sys/fs/cgroup/")
58 cg = cgroups[ino]
/tools/testing/selftests/bpf/progs/
A Dpercpu_alloc_cgrp_local_storage.c30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG()
56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
A Dcgrp_ls_recursion.c59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG()
92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
A Drcu_read_lock.c34 struct css_set *cgroups; in get_cgroup_id() local
42 cgroups = task->cgroups; in get_cgroup_id()
43 if (!cgroups) in get_cgroup_id()
45 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
A Dcgrp_ls_sleepable.c86 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock()
118 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
A Dcgrp_ls_tp_btf.c86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
A Dprofiler.inc.h255 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in populate_cgroup_info()
266 BPF_CORE_READ(task, cgroups, subsys[i]); in populate_cgroup_info()
627 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in raw_tracepoint__sched_process_exit()
/tools/perf/util/
A Dcgroup.c562 down_write(&env->cgroups.lock); in cgroup__findnew()
563 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew()
564 up_write(&env->cgroups.lock); in cgroup__findnew()
577 down_read(&env->cgroups.lock); in cgroup__find()
578 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find()
579 up_read(&env->cgroups.lock); in cgroup__find()
588 down_write(&env->cgroups.lock); in perf_env__purge_cgroups()
589 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups()
590 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups()
593 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups()
[all …]
A Dbpf-trace-summary.c23 static struct rb_root cgroups = RB_ROOT; variable
54 read_all_cgroups(&cgroups); in trace_prepare_bpf_summary()
339 struct cgroup *cgrp = __cgroup__find(&cgroups, data->key); in print_cgroup_stat()
448 if (!RB_EMPTY_ROOT(&cgroups)) { in trace_cleanup_bpf_summary()
451 rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node) in trace_cleanup_bpf_summary()
454 cgroups = RB_ROOT; in trace_cleanup_bpf_summary()
A Dcgroup.h31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups, bool open_cgroup);
A Dlock-contention.h151 struct rb_root cgroups; member
A Dbpf_lock_contention.c384 read_all_cgroups(&con->cgroups); in lock_contention_prepare()
622 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name()
840 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish()
841 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish()
844 rb_erase(node, &con->cgroups); in lock_contention_finish()
A Denv.h123 } cgroups; member
/tools/perf/Documentation/
A Dperf-bench.txt128 --cgroups=::
129 Names of cgroups for sender and receiver, separated by a comma.
131 Note that perf doesn't create nor delete the cgroups, so users should
132 make sure that the cgroups exist and are accessible before use.
154 (executing 1000000 pipe operations between cgroups)
A Dperf-lock.txt217 Show lock contention only in the given cgroups (comma separated list).
A Dperf-trace.txt75 Look for cgroups to set at the /sys/fs/cgroup/perf_event directory, then
89 Multiple cgroups:
/tools/perf/tests/shell/
A Drecord_bpf_filter.sh154 -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
A Drecord.sh216 if ! perf record -aB --synth=cgroup --all-cgroups -o "${perfdata}" ${testprog} 2> /dev/null
/tools/perf/util/bpf_skel/
A Doff_cpu.bpf.c155 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id()
166 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
A Dsyscall_summary.bpf.c62 cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup); in get_current_cgroup_id()
A Dbperf_cgroup.bpf.c97 cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_v1_idx()
/tools/perf/util/bpf_skel/vmlinux/
A Dvmlinux.h107 struct css_set *cgroups; member
/tools/testing/selftests/mm/
A Dcharge_reserved_hugetlb.sh506 echo Test normal case, multiple cgroups.
552 echo Test normal case with write, multiple cgroups.
/tools/bpf/bpftool/Documentation/
A Dbpftool-cgroup.rst63 Iterate over all cgroups in *CGROUP_ROOT* and list all attached programs.

Completed in 34 milliseconds

12