| /kernel/trace/ |
| A D | trace_probe.c | 473 if (ctx->btf) in query_btf_context() 483 ctx->btf = btf; in query_btf_context() 510 if (ctx->btf) { in clear_btf_context() 512 ctx->btf = NULL; in clear_btf_context() 674 if (!ctx->btf) { in parse_btf_arg() 733 typestr = fetch_type_from_btf_type(btf, ctx->last_type, ctx); in find_fetch_type_from_btf_type() 743 if ((ctx->last_bitsize % 8 == 0) && ctx->last_bitoffs == 0) in parse_btf_bitfield() 754 code->lshift = 64 - (ctx->last_bitsize + ctx->last_bitoffs); in parse_btf_bitfield() 763 ctx->btf = NULL; in clear_btf_context() 1508 ctx); in traceprobe_parse_probe_arg_body() [all …]
|
| A D | trace_fprobe.c | 1211 struct traceprobe_parse_context *ctx) in trace_fprobe_create_internal() argument 1302 ctx->flags |= TPARG_FL_RETURN; in trace_fprobe_create_internal() 1304 ctx->flags |= TPARG_FL_FENTRY; in trace_fprobe_create_internal() 1306 ctx->funcname = NULL; in trace_fprobe_create_internal() 1311 ctx->flags |= TPARG_FL_TPOINT; in trace_fprobe_create_internal() 1322 if (!ctx->funcname) in trace_fprobe_create_internal() 1323 ctx->funcname = symbol; in trace_fprobe_create_internal() 1330 abuf, MAX_BTF_ARGS_LEN, ctx); in trace_fprobe_create_internal() 1359 ctx->offset = 0; in trace_fprobe_create_internal() 1403 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in trace_fprobe_create_cb() [all …]
|
| A D | bpf_trace.c | 147 ctx, bpf_prog_run); in trace_call_bpf() 720 .data = ctx, in bpf_event_output() 1036 return ((u64 *)ctx)[-2]; in BPF_CALL_1() 1146 return ctx->event->bpf_cookie; in BPF_CALL_1() 1198 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_3() 1202 *value = ((u64 *)ctx)[n]; in BPF_CALL_3() 1218 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_2() 1220 *value = ((u64 *)ctx)[nr_args]; in BPF_CALL_2() 1232 BPF_CALL_1(get_func_arg_cnt, void *, ctx) in BPF_CALL_1() argument 1235 return ((u64 *)ctx)[-1]; in BPF_CALL_1() [all …]
|
| /kernel/ |
| A D | auditsc.c | 266 ctx->first_trees = ctx->trees; in grow_tree_refs() 346 if (ctx) { in audit_compare_uid() 476 if (ctx && rule->prio <= ctx->prio) in audit_filter_rules() 555 if (ctx && ctx->return_valid != AUDITSC_INVALID) in audit_filter_rules() 656 if (ctx && ctx->sockaddr) in audit_filter_rules() 719 if (!ctx || ctx->type != AUDIT_IPC) in audit_filter_rules() 996 ctx->current_state = ctx->state; in audit_reset_context() 1001 memset(ctx->argv, 0, sizeof(ctx->argv)); in audit_reset_context() 1015 ctx->uid = ctx->euid = ctx->suid = ctx->fsuid = KUIDT_INIT(0); in audit_reset_context() 1016 ctx->gid = ctx->egid = ctx->sgid = ctx->fsgid = KGIDT_INIT(0); in audit_reset_context() [all …]
|
| A D | audit.c | 1494 sig_data, struct_size(sig_data, ctx, in audit_receive_msg() 1801 ab->ctx = ctx; in audit_buffer_alloc() 1838 if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { in audit_get_stamp() 1913 ab = audit_buffer_alloc(ctx, gfp_mask, type); in audit_log_start() 1919 audit_get_stamp(ab->ctx, &t, &serial); in audit_log_start() 1921 if (ctx) in audit_log_start() 1922 ctx->dummy = 0; in audit_log_start() 2183 struct lsm_context ctx; in audit_log_task_context() local 2197 audit_log_format(ab, " subj=%s", ctx.context); in audit_log_task_context() 2198 security_release_secctx(&ctx); in audit_log_task_context() [all …]
|
| A D | workqueue.c | 5245 if (ctx) { in apply_wqattrs_cleanup() 5254 kfree(ctx); in apply_wqattrs_cleanup() 5274 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); in apply_wqattrs_prepare() 5289 if (!ctx->dfl_pwq) in apply_wqattrs_prepare() 5295 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; in apply_wqattrs_prepare() 5319 ctx->wq = wq; in apply_wqattrs_prepare() 5320 return ctx; in apply_wqattrs_prepare() 5336 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit() 5340 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, in apply_wqattrs_commit() 5342 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); in apply_wqattrs_commit() [all …]
|
| /kernel/bpf/ |
| A D | crypto.c | 179 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in bpf_crypto_ctx_create() 180 if (!ctx) { in bpf_crypto_ctx_create() 185 ctx->type = type; in bpf_crypto_ctx_create() 207 ctx->siv_len = type->ivsize(ctx->tfm) + type->statesize(ctx->tfm); in bpf_crypto_ctx_create() 211 return ctx; in bpf_crypto_ctx_create() 216 kfree(ctx); in bpf_crypto_ctx_create() 228 ctx->type->free_tfm(ctx->tfm); in crypto_free_cb() 230 kfree(ctx); in crypto_free_cb() 247 return ctx; in bpf_crypto_ctx_acquire() 298 err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv) in bpf_crypto_crypt() [all …]
|
| A D | cgroup.c | 105 args = (u64 *)ctx; in __cgroup_bpf_run_lsm_sock() 127 args = (u64 *)ctx; in __cgroup_bpf_run_lsm_socket() 1914 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); in __cgroup_bpf_run_filter_sysctl() 1928 memcpy(ctx.new_val, *buf, ctx.new_len); in __cgroup_bpf_run_filter_sysctl() 1973 ctx->optval_end = ctx->optval + max_optlen; in sockopt_alloc_buf() 1981 ctx->optval_end = ctx->optval + max_optlen; in sockopt_alloc_buf() 2041 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { in __cgroup_bpf_run_filter_setsockopt() 2076 memcpy(p, ctx.optval, ctx.optlen); in __cgroup_bpf_run_filter_setsockopt() 2148 (ctx.optlen > max_optlen || ctx.optlen < 0)) { in __cgroup_bpf_run_filter_getsockopt() 2161 copy_to_sockptr(optval, ctx.optval, ctx.optlen)) { in __cgroup_bpf_run_filter_getsockopt() [all …]
|
| A D | link_iter.c | 46 struct bpf_iter__bpf_link ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.link = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
| A D | prog_iter.c | 46 struct bpf_iter__bpf_prog ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.prog = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
| A D | kmem_cache_iter.c | 155 struct bpf_iter__kmem_cache ctx = { in kmem_cache_iter_seq_stop() local 164 if (prog && !ctx.s) in kmem_cache_iter_seq_stop() 165 bpf_iter_run_prog(prog, &ctx); in kmem_cache_iter_seq_stop() 182 struct bpf_iter__kmem_cache ctx = { in kmem_cache_iter_seq_show() local 192 ret = bpf_iter_run_prog(prog, &ctx); in kmem_cache_iter_seq_show()
|
| A D | task_iter.c | 179 struct bpf_iter__task ctx; in DEFINE_BPF_ITER_FUNC() local 187 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 188 ctx.task = task; in DEFINE_BPF_ITER_FUNC() 347 struct bpf_iter__task_file ctx; in DEFINE_BPF_ITER_FUNC() local 356 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 357 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC() 358 ctx.fd = info->fd; in DEFINE_BPF_ITER_FUNC() 359 ctx.file = file; in DEFINE_BPF_ITER_FUNC() 618 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 619 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC() [all …]
|
| A D | map_iter.c | 46 struct bpf_iter__bpf_map ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.map = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
| /kernel/events/ |
| A D | core.c | 302 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function() 1372 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested() 1380 if (event->ctx != ctx) { in perf_event_ctx_lock_nested() 1569 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); in __update_context_time() 2525 if (ctx == &cpuctx->ctx) in __perf_remove_from_context() 3506 __ctx_time_update(cpuctx, ctx, ctx == &cpuctx->ctx); in ctx_sched_out() 4964 ctx = &cpuctx->ctx; in find_get_context() 5046 epc->ctx = ctx; in find_get_pmu_context() 5090 epc->ctx = ctx; in find_get_pmu_context() 14676 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context() [all …]
|
| A D | callchain.c | 224 struct perf_callchain_entry_ctx ctx; in get_perf_callchain() local 231 ctx.entry = entry; in get_perf_callchain() 232 ctx.max_stack = max_stack; in get_perf_callchain() 233 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 234 ctx.contexts = 0; in get_perf_callchain() 235 ctx.contexts_maxed = false; in get_perf_callchain() 239 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); in get_perf_callchain() 240 perf_callchain_kernel(&ctx, regs); in get_perf_callchain() 256 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); in get_perf_callchain() 259 perf_callchain_user(&ctx, regs); in get_perf_callchain()
|
| /kernel/cgroup/ |
| A D | cgroup-v1.c | 565 ctx = of->priv; in cgroup_release_agent_write() 1027 if (ctx->name) in cgroup1_parse_param() 1058 if (!ctx->subsys_mask && !ctx->none && !ctx->name) in check_cgroupfs_options() 1073 if (!ctx->subsys_mask && !ctx->name) in check_cgroupfs_options() 1085 if (ctx->subsys_mask && ctx->none) in check_cgroupfs_options() 1106 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent) in cgroup1_reconfigure() 1115 (ctx->name && strcmp(ctx->name, root->name))) { in cgroup1_reconfigure() 1117 ctx->flags, ctx->name ?: "", root->flags, root->name); in cgroup1_reconfigure() 1203 if (ctx->name) { in cgroup1_root_to_use() 1213 if ((ctx->subsys_mask || ctx->none) && in cgroup1_root_to_use() [all …]
|
| A D | cgroup.c | 2100 if (ctx->name) in init_cgroup_root() 2216 ctx->kfc.root = ctx->root->kf_root; in cgroup_do_get_tree() 2235 cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root); in cgroup_do_get_tree() 2263 kfree(ctx->name); in cgroup_fs_context_free() 2267 kfree(ctx); in cgroup_fs_context_free() 2308 if (!ctx) in cgroup_init_fs_context() 4134 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in cgroup_file_open() 4135 if (!ctx) in cgroup_file_open() 4140 of->priv = ctx; in cgroup_file_open() 4148 kfree(ctx); in cgroup_file_open() [all …]
|
| /kernel/locking/ |
| A D | test-ww_mutex.c | 61 struct ww_acquire_ctx ctx; in __test_mutex() local 95 ww_acquire_fini(&ctx); in __test_mutex() 126 struct ww_acquire_ctx ctx; in test_aa() local 174 ww_acquire_fini(&ctx); in test_aa() 200 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx); in test_abba_work() 215 ww_acquire_fini(&ctx); in test_abba_work() 242 WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx); in test_abba() 257 ww_acquire_fini(&ctx); in test_abba() 312 ww_acquire_fini(&ctx); in test_cycle_work() 473 ww_acquire_fini(&ctx); in stress_inorder_work() [all …]
|
| A D | mutex.c | 860 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection() 868 ctx->contending_lock = lock; in ww_mutex_deadlock_injection() 886 0, _RET_IP_, ctx); in ww_mutex_lock() 887 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock() 901 0, _RET_IP_, ctx); in ww_mutex_lock_interruptible() 903 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock_interruptible() 1068 _RET_IP_, ctx); in __ww_mutex_lock_slowpath() 1073 struct ww_acquire_ctx *ctx) in __ww_mutex_lock_interruptible_slowpath() argument 1076 _RET_IP_, ctx); in __ww_mutex_lock_interruptible_slowpath() 1124 if (ctx) in ww_mutex_lock() [all …]
|
| A D | ww_mutex.h | 190 DEBUG_LOCKS_WARN_ON(ww->ctx); in ww_mutex_lock_acquired() 218 ww->ctx = ww_ctx; in ww_mutex_lock_acquired() 401 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath() 466 struct ww_acquire_ctx *ctx) in __ww_mutex_check_kill() argument 472 if (ctx->acquired == 0) in __ww_mutex_check_kill() 475 if (!ctx->is_wait_die) { in __ww_mutex_check_kill() 476 if (ctx->wounded) in __ww_mutex_check_kill() 586 if (lock->ctx) { in __ww_mutex_unlock() 590 if (lock->ctx->acquired > 0) in __ww_mutex_unlock() 591 lock->ctx->acquired--; in __ww_mutex_unlock() [all …]
|
| A D | ww_rt_mutex.c | 48 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) in __ww_rt_mutex_lock() 79 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock() 86 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument 88 return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_); in ww_mutex_lock_interruptible()
|
| /kernel/kcsan/ |
| A D | core.c | 221 ctx->disable_scoped++; in kcsan_check_scoped_accesses() 226 ctx->disable_scoped--; in kcsan_check_scoped_accesses() 264 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; in is_atomic() 394 return ctx->disable_scoped ? NULL : &ctx->reorder_access; in get_reorder_access() 431 ctx->disable_scoped++; in set_reorder_access() 439 ctx->disable_scoped--; in set_reorder_access() 483 if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip)) in kcsan_found_watchpoint() 580 ctx->disable_scoped++; in kcsan_setup_watchpoint() 705 ctx->disable_scoped--; in kcsan_setup_watchpoint() 926 ctx->disable_count--; in kcsan_begin_scoped_access() [all …]
|
| /kernel/bpf/preload/iterators/ |
| A D | iterators.bpf.c | 79 int dump_bpf_map(struct bpf_iter__bpf_map *ctx) in dump_bpf_map() argument 81 struct seq_file *seq = ctx->meta->seq; in dump_bpf_map() 82 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_map() 83 struct bpf_map *map = ctx->map; in dump_bpf_map() 99 int dump_bpf_prog(struct bpf_iter__bpf_prog *ctx) in dump_bpf_prog() argument 101 struct seq_file *seq = ctx->meta->seq; in dump_bpf_prog() 102 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_prog() 103 struct bpf_prog *prog = ctx->prog; in dump_bpf_prog()
|
| A D | iterators.lskel-big-endian.h | 9 struct bpf_loader_ctx ctx; member 80 skel->ctx.sz = (void *)&skel->links - (void *)skel; in iterators_bpf__open() 399 opts.ctx = (struct bpf_loader_ctx *)skel; in iterators_bpf__load()
|
| A D | iterators.lskel-little-endian.h | 9 struct bpf_loader_ctx ctx; member 80 skel->ctx.sz = (void *)&skel->links - (void *)skel; in iterators_bpf__open() 93 opts.ctx = (struct bpf_loader_ctx *)skel; in iterators_bpf__load()
|