/linux-6.3-rc2/kernel/cgroup/ |
A D | freezer.c | 24 while ((cgrp = cgroup_parent(cgrp))) { in cgroup_propagate_frozen() 30 cgrp->nr_descendants) { in cgroup_propagate_frozen() 64 cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp); in cgroup_update_frozen() 109 struct cgroup *cgrp; in cgroup_enter_frozen() local 117 cgroup_inc_frozen_cnt(cgrp); in cgroup_enter_frozen() 118 cgroup_update_frozen(cgrp); in cgroup_enter_frozen() 133 struct cgroup *cgrp; in cgroup_leave_frozen() local 138 cgroup_dec_frozen_cnt(cgrp); in cgroup_leave_frozen() 139 cgroup_update_frozen(cgrp); in cgroup_leave_frozen() 213 if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants) in cgroup_do_freeze() [all …]
|
A D | rstat.c | 62 rstatc->updated_next = cgrp; in cgroup_rstat_updated() 68 prstatc->updated_children = cgrp; in cgroup_rstat_updated() 70 cgrp = parent; in cgroup_rstat_updated() 289 if (!cgrp->rstat_cpu) { in cgroup_rstat_init() 291 if (!cgrp->rstat_cpu) in cgroup_rstat_init() 310 cgroup_rstat_flush(cgrp); in cgroup_rstat_exit() 321 free_percpu(cgrp->rstat_cpu); in cgroup_rstat_exit() 322 cgrp->rstat_cpu = NULL; in cgroup_rstat_exit() 383 delta = cgrp->bstat; in cgroup_base_stat_flush() 499 if (cgroup_parent(cgrp)) { in cgroup_base_stat_cputime_show() [all …]
|
A D | cgroup.c | 361 return cgrp->dom_cgrp != cgrp; in cgroup_is_threaded() 427 while ((cgrp = cgroup_parent(cgrp))) { in cgroup_is_valid_domain() 541 cgrp = cgroup_parent(cgrp); in cgroup_e_css_by_mask() 575 cgrp = cgroup_parent(cgrp); in cgroup_e_css() 607 cgrp = cgroup_parent(cgrp); in cgroup_get_e_css() 854 cgrp = cgroup_parent(cgrp); in cgroup_update_populated() 1185 link->cgrp = cgrp; in link_css_set() 1359 struct cgroup *cgrp = &root->cgrp; in cgroup_destroy_root() local 2011 cgrp->self.cgroup = cgrp; in init_cgroup_housekeeping() 2013 cgrp->dom_cgrp = cgrp; in init_cgroup_housekeeping() [all …]
|
A D | cgroup-internal.h | 27 #define TRACE_CGROUP_PATH(type, cgrp, ...) \ argument 33 cgroup_path(cgrp, trace_cgroup_path, \ 35 trace_cgroup_##type(cgrp, trace_cgroup_path, \ 96 struct cgroup *cgrp; member 186 return !(cgrp->self.flags & CSS_ONLINE); in cgroup_is_dead() 222 bool cgroup_on_dfl(const struct cgroup *cgrp); 223 bool cgroup_is_thread_root(struct cgroup *cgrp); 224 bool cgroup_is_threaded(struct cgroup *cgrp); 267 int cgroup_task_count(const struct cgroup *cgrp); 272 int cgroup_rstat_init(struct cgroup *cgrp); [all …]
|
A D | cgroup-v1.c | 318 l->owner = cgrp; in cgroup_pidlist_find_create() 493 struct cgroup *cgrp; in __cgroup1_procs_write() local 500 if (!cgrp) in __cgroup1_procs_write() 548 struct cgroup *cgrp; in cgroup_release_agent_write() local 563 if (!cgrp) in cgroup_release_agent_write() 702 struct cgroup *cgrp; in cgroupstats_build() local 718 if (!cgrp || !cgroup_tryget(cgrp)) { in cgroupstats_build() 747 cgroup_put(cgrp); in cgroupstats_build() 753 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && in cgroup1_check_for_release() 754 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) in cgroup1_check_for_release() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
A D | cgrp.c | 178 nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock); in nvkm_cgrp_del() 182 kfree(cgrp); in nvkm_cgrp_del() 190 if (!cgrp) in nvkm_cgrp_unref() 200 if (cgrp) in nvkm_cgrp_ref() 203 return cgrp; in nvkm_cgrp_ref() 211 if (!cgrp) in nvkm_cgrp_put() 224 if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL))) in nvkm_cgrp_new() 227 cgrp->func = runl->fifo->func->cgrp.func; in nvkm_cgrp_new() 228 strscpy(cgrp->name, name, sizeof(cgrp->name)); in nvkm_cgrp_new() 231 cgrp->hw = hw; in nvkm_cgrp_new() [all …]
|
A D | chan.c | 44 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_cctx_bind() local 56 if (cgrp->hw) in nvkm_chan_cctx_bind() 66 if (cgrp->hw) in nvkm_chan_cctx_bind() 96 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_cctx_get() local 166 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_remove_locked() local 198 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_insert() local 211 list_add_tail(&cgrp->head, &cgrp->runl->cgrps); in nvkm_chan_insert() 392 if (!cgrp) { in nvkm_chan_new_() 399 cgrp = chan->cgrp; in nvkm_chan_new_() 401 if (cgrp->runl != runl || cgrp->vmm != vmm) { in nvkm_chan_new_() [all …]
|
A D | gk110.c | 38 struct nvkm_cgrp *cgrp = chan->cgrp; in gk110_chan_preempt() local 40 if (cgrp->hw) { in gk110_chan_preempt() 41 cgrp->func->preempt(cgrp); in gk110_chan_preempt() 61 gk110_cgrp_preempt(struct nvkm_cgrp *cgrp) in gk110_cgrp_preempt() argument 63 nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id); in gk110_cgrp_preempt() 72 gk110_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset) in gk110_runl_insert_cgrp() argument 74 nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) | in gk110_runl_insert_cgrp() 75 (3 << 14) | 0x00002000 | cgrp->id); in gk110_runl_insert_cgrp() 123 .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
|
A D | runl.c | 48 cgrp = chan->cgrp; in nvkm_engn_cgrp_get() 53 WARN_ON(!cgrp); in nvkm_engn_cgrp_get() 54 return cgrp; in nvkm_engn_cgrp_get() 103 if (!cgrp) { in nvkm_runl_rc() 157 struct nvkm_cgrp *cgrp; in nvkm_runl_rc_engn() local 162 if (!cgrp) { in nvkm_runl_rc_engn() 167 nvkm_runl_rc_cgrp(cgrp); in nvkm_runl_rc_engn() 231 struct nvkm_cgrp *cgrp; in nvkm_runl_cgrp_get_cgid() local 236 cgrp = cgid->data[id]; in nvkm_runl_cgrp_get_cgid() 237 if (likely(cgrp)) { in nvkm_runl_cgrp_get_cgid() [all …]
|
A D | ucgrp.c | 33 struct nvkm_cgrp *cgrp; member 40 struct nvkm_cgrp *cgrp = nvkm_ucgrp(oclass->parent)->cgrp; in nvkm_ucgrp_chan_new() local 42 return nvkm_uchan_new(cgrp->runl->fifo, cgrp, oclass, argv, argc, pobject); in nvkm_ucgrp_chan_new() 48 struct nvkm_cgrp *cgrp = nvkm_ucgrp(object)->cgrp; in nvkm_ucgrp_sclass() local 49 struct nvkm_fifo *fifo = cgrp->runl->fifo; in nvkm_ucgrp_sclass() 70 nvkm_cgrp_unref(&ucgrp->cgrp); in nvkm_ucgrp_dtor() 115 ret = nvkm_cgrp_new(runl, args->v0.name, vmm, true, &ucgrp->cgrp); in nvkm_ucgrp_new() 120 args->v0.cgid = ucgrp->cgrp->id; in nvkm_ucgrp_new()
|
/linux-6.3-rc2/tools/testing/selftests/bpf/progs/ |
A D | cgrp_kfunc_failure.c | 24 status = cgrps_kfunc_map_insert(cgrp); in insert_lookup_cgrp() 38 v = insert_lookup_cgrp(cgrp); in BPF_PROG() 69 acquired = bpf_cgroup_acquire(cgrp); in BPF_PROG() 109 acquired = bpf_cgroup_acquire(cgrp); in BPF_PROG() 123 kptr = bpf_cgroup_kptr_get(&cgrp); in BPF_PROG() 174 v = insert_lookup_cgrp(cgrp); in BPF_PROG() 194 v = insert_lookup_cgrp(cgrp); in BPF_PROG() 213 v = insert_lookup_cgrp(cgrp); in BPF_PROG() 218 bpf_cgroup_release(v->cgrp); in BPF_PROG() 248 local.cgrp = NULL; in BPF_PROG() [all …]
|
A D | cgrp_kfunc_success.c | 40 acquired = bpf_cgroup_acquire(cgrp); in BPF_PROG() 54 status = cgrps_kfunc_map_insert(cgrp); in BPF_PROG() 71 status = cgrps_kfunc_map_insert(cgrp); in BPF_PROG() 77 v = cgrps_kfunc_map_value_lookup(cgrp); in BPF_PROG() 83 kptr = bpf_kptr_xchg(&v->cgrp, NULL); in BPF_PROG() 104 status = cgrps_kfunc_map_insert(cgrp); in BPF_PROG() 110 v = cgrps_kfunc_map_value_lookup(cgrp); in BPF_PROG() 116 kptr = bpf_cgroup_kptr_get(&v->cgrp); in BPF_PROG() 135 self = bpf_cgroup_ancestor(cgrp, cgrp->level); in BPF_PROG() 141 if (self->self.id != cgrp->self.id) { in BPF_PROG() [all …]
|
A D | cgrp_ls_sleepable.c | 28 struct cgroup *cgrp = ctx->cgroup; in cgroup_iter() local 31 if (cgrp == NULL) in cgroup_iter() 34 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, in cgroup_iter() 37 cgroup_id = cgrp->kn->id; in cgroup_iter() 45 struct cgroup *cgrp; in no_rcu_lock() local 53 cgrp = task->cgroups->dfl_cgrp; in no_rcu_lock() 54 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, in no_rcu_lock() 57 cgroup_id = cgrp->kn->id; in no_rcu_lock() 65 struct cgroup *cgrp; in yes_rcu_lock() local 73 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock() [all …]
|
A D | cgrp_kfunc_common.h | 13 struct cgroup __kptr_ref * cgrp; member 26 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 28 static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) in cgrps_kfunc_map_value_lookup() argument 33 status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); in cgrps_kfunc_map_value_lookup() 40 static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp) in cgrps_kfunc_map_insert() argument 47 status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); in cgrps_kfunc_map_insert() 51 local.cgrp = NULL; in cgrps_kfunc_map_insert() 62 acquired = bpf_cgroup_acquire(cgrp); in cgrps_kfunc_map_insert() 63 old = bpf_kptr_xchg(&v->cgrp, acquired); in cgrps_kfunc_map_insert()
|
A D | cgroup_hierarchical_stats.c | 40 extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym; 41 extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym; 43 static uint64_t cgroup_id(struct cgroup *cgrp) in cgroup_id() argument 45 return cgrp->kn->id; in cgroup_id() 83 int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu) in BPF_PROG() argument 87 __u64 cg_id = cgroup_id(cgrp); in BPF_PROG() 133 int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp) in BPF_PROG() argument 137 __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0; in BPF_PROG() 144 cgroup_rstat_flush(cgrp); in BPF_PROG()
|
A D | cgroup_iter.c | 12 static inline u64 cgroup_id(struct cgroup *cgrp) in cgroup_id() argument 14 return cgrp->kn->id; in cgroup_id() 21 struct cgroup *cgrp = ctx->cgroup; in cgroup_id_printer() local 24 if (cgrp == NULL) { in cgroup_id_printer() 33 BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp)); in cgroup_id_printer() 35 if (terminal_cgroup == cgroup_id(cgrp)) in cgroup_id_printer()
|
/linux-6.3-rc2/tools/perf/util/ |
A D | cgroup.c | 151 if (!cgrp) in add_cgroup() 167 counter->cgrp = cgrp; in add_cgroup() 181 if (cgrp && refcount_dec_and_test(&cgrp->refcnt)) { in cgroup__put() 392 cgrp = counter->cgrp; in parse_cgroups() 394 counter->cgrp = cgrp; in parse_cgroups() 471 evsel->cgrp = cgroup__get(cgrp); in evlist__expand_cgroup() 534 cgrp = malloc(sizeof(*cgrp)); in __cgroup__findnew() 540 free(cgrp); in __cgroup__findnew() 551 return cgrp; in __cgroup__findnew() 562 return cgrp; in cgroup__findnew() [all …]
|
A D | bpf_counter_cgroup.c | 48 struct cgroup *cgrp, *leader_cgrp; in bperf_load_program() local 109 cgrp = NULL; in bperf_load_program() 113 if (cgrp == NULL || evsel->cgrp == leader_cgrp) { in bperf_load_program() 114 leader_cgrp = evsel->cgrp; in bperf_load_program() 115 evsel->cgrp = NULL; in bperf_load_program() 130 evsel->cgrp = leader_cgrp; in bperf_load_program() 133 if (evsel->cgrp == cgrp) in bperf_load_program() 136 cgrp = evsel->cgrp; in bperf_load_program() 138 if (read_cgroup_id(cgrp) < 0) { in bperf_load_program() 145 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); in bperf_load_program() [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | cgroup.h | 328 return cgrp->kn->id; in cgroup_id() 353 css_get(&cgrp->self); in cgroup_get() 363 css_put(&cgrp->self); in cgroup_put() 517 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) in cgroup_is_descendant() 561 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + in cgroup_is_populated() 568 return kernfs_ino(cgrp->kn); in cgroup_ino() 718 struct cgroup *cgrp; in cgroup_account_cputime() local 722 cgrp = task_dfl_cgroup(task); in cgroup_account_cputime() 723 if (cgroup_parent(cgrp)) in cgroup_account_cputime() 731 struct cgroup *cgrp; in cgroup_account_cputime_field() local [all …]
|
A D | psi.h | 34 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) in cgroup_psi() argument 36 return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi; in cgroup_psi() 39 int psi_cgroup_alloc(struct cgroup *cgrp); 40 void psi_cgroup_free(struct cgroup *cgrp); 53 static inline int psi_cgroup_alloc(struct cgroup *cgrp) in psi_cgroup_alloc() argument 57 static inline void psi_cgroup_free(struct cgroup *cgrp) in psi_cgroup_free() argument
|
/linux-6.3-rc2/include/trace/events/ |
A D | cgroup.h | 58 TP_ARGS(cgrp, path), 70 __entry->level = cgrp->level; 82 TP_ARGS(cgrp, path) 89 TP_ARGS(cgrp, path) 96 TP_ARGS(cgrp, path) 103 TP_ARGS(cgrp, path) 110 TP_ARGS(cgrp, path) 117 TP_ARGS(cgrp, path) 170 TP_ARGS(cgrp, path, val), 197 TP_ARGS(cgrp, path, val) [all …]
|
/linux-6.3-rc2/kernel/bpf/ |
A D | cgroup.c | 71 struct cgroup *cgrp; in __cgroup_bpf_run_lsm_sock() local 81 if (likely(cgrp)) in __cgroup_bpf_run_lsm_sock() 93 struct cgroup *cgrp; in __cgroup_bpf_run_lsm_socket() local 103 if (likely(cgrp)) in __cgroup_bpf_run_lsm_socket() 114 struct cgroup *cgrp; in __cgroup_bpf_run_lsm_current() local 122 if (likely(cgrp)) in __cgroup_bpf_run_lsm_current() 194 cgroup_get(cgrp); in cgroup_bpf_offline() 324 cgroup_put(cgrp); in cgroup_bpf_release() 424 p = cgrp; in compute_effective_progs() 1137 if (IS_ERR(cgrp)) in cgroup_bpf_prog_attach() [all …]
|
/linux-6.3-rc2/tools/testing/selftests/bpf/ |
A D | test_sockmap.c | 1432 test_exec(cgrp, opt); in test_send_one() 1437 test_exec(cgrp, opt); in test_send_one() 1442 test_exec(cgrp, opt); in test_send_one() 1451 test_exec(cgrp, opt); in test_send_many() 1456 test_exec(cgrp, opt); in test_send_many() 1464 test_exec(cgrp, opt); in test_send_large() 1479 test_send(opt, cgrp); in test_txmsg_pass() 1485 test_send(opt, cgrp); in test_txmsg_redir() 1499 test_send(opt, cgrp); in test_txmsg_drop() 1506 test_send(opt, cgrp); in test_txmsg_ingress_redir() [all …]
|
/linux-6.3-rc2/block/ |
A D | blk-cgroup-fc-appid.c | 13 struct cgroup *cgrp; in blkcg_set_fc_appid() local 21 cgrp = cgroup_get_from_id(cgrp_id); in blkcg_set_fc_appid() 22 if (IS_ERR(cgrp)) in blkcg_set_fc_appid() 23 return PTR_ERR(cgrp); in blkcg_set_fc_appid() 24 css = cgroup_get_e_css(cgrp, &io_cgrp_subsys); in blkcg_set_fc_appid() 40 cgroup_put(cgrp); in blkcg_set_fc_appid()
|
/linux-6.3-rc2/tools/perf/util/bpf_skel/ |
A D | bperf_cgroup.bpf.c | 65 static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level) in get_cgroup_v1_ancestor_id() argument 68 struct cgroup___new *cgrp_new = (void *)cgrp; in get_cgroup_v1_ancestor_id() 74 struct cgroup___old *cgrp_old = (void *)cgrp; in get_cgroup_v1_ancestor_id() 83 struct cgroup *cgrp; in get_cgroup_v1_idx() local 97 cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_v1_idx() 98 level = BPF_CORE_READ(cgrp, level); in get_cgroup_v1_idx() 107 cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i); in get_cgroup_v1_idx() 153 __u32 key, cgrp; in bperf_cgroup_count() local 193 cgrp = cgrp_idx[c]; in bperf_cgroup_count() 196 key = cgrp * num_events + idx; in bperf_cgroup_count()
|