| /tools/testing/selftests/bpf/progs/ |
| A D | task_kfunc_failure.c | 34 struct task_struct *acquired; in BPF_PROG() 55 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; in BPF_PROG() 71 struct task_struct *acquired; in BPF_PROG() 86 struct task_struct *acquired; in BPF_PROG() 106 struct task_struct *acquired; in BPF_PROG() 121 struct task_struct *acquired; in BPF_PROG() 135 struct task_struct *kptr; in BPF_PROG() 155 struct task_struct *acquired; in BPF_PROG() 184 struct task_struct *acquired = (struct task_struct *)&clone_flags; in BPF_PROG() 240 struct task_struct *acquired; in BPF_PROG() [all …]
|
| A D | task_kfunc_success.c | 21 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; 23 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak; 25 struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak; 41 struct task_struct *acquired = NULL; in test_acquire_release() 69 struct task_struct *acquired = NULL; in BPF_PROG() 146 struct task_struct *kptr, *acquired; in BPF_PROG() 230 struct task_struct *kptr; in BPF_PROG() 285 struct task_struct *acquired; in lookup_compare_pid() 320 struct task_struct *acquired; in is_pid_lookup_valid() 357 struct task_struct *acquired; in BPF_PROG() [all …]
|
| A D | verifier_global_ptr_args.c | 12 extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; 13 extern void bpf_task_release(struct task_struct *p) __ksym __weak; 33 struct task_struct *t1 = bpf_get_current_task_btf(); in trusted_task_arg_nullable() 34 struct task_struct *t2 = bpf_task_acquire(t1); in trusted_task_arg_nullable() 79 struct task_struct *t = bpf_get_current_task_btf(); in trusted_task_arg_nonnull_fail2() 80 struct task_struct *nullable; in trusted_task_arg_nonnull_fail2() 100 struct task_struct *t = bpf_get_current_task_btf(); in trusted_task_arg_nonnull() 142 struct task_struct *t = bpf_get_current_task_btf(); in flavor_ptr_nonnull() 164 struct task_struct *owned; in subprog_trusted_acq_rel() 235 subprog_untrusted(bpf_core_cast(0, struct task_struct)); in anything_to_untrusted() [all …]
|
| A D | task_kfunc_common.h | 13 struct task_struct __kptr * task; 23 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 24 void bpf_task_release(struct task_struct *p) __ksym; 25 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 26 struct task_struct *bpf_task_from_vpid(s32 vpid) __ksym; 30 static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) in tasks_kfunc_map_value_lookup() 42 static inline int tasks_kfunc_map_insert(struct task_struct *p) in tasks_kfunc_map_insert() 46 struct task_struct *acquired, *old; in tasks_kfunc_map_insert()
|
| A D | struct_ops_kptr_return_fail__wrong_type.c | 9 void bpf_task_release(struct task_struct *p) __ksym; 16 struct task_struct *BPF_PROG(kptr_return_fail__wrong_type, int dummy, in BPF_PROG() 17 struct task_struct *task, struct cgroup *cgrp) in BPF_PROG() 19 struct task_struct *ret; in BPF_PROG() 21 ret = (struct task_struct *)bpf_cgroup_acquire(cgrp); in BPF_PROG()
|
| A D | test_task_under_cgroup.c | 11 long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; 13 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 14 void bpf_task_release(struct task_struct *p) __ksym; 21 int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags) in BPF_PROG() 24 struct task_struct *acquired; in BPF_PROG() 55 struct task_struct *task; in BPF_PROG()
|
| A D | struct_ops_refcounted_fail__global_subprog.c | 8 extern void bpf_task_release(struct task_struct *p) __ksym; 12 struct task_struct *task = (struct task_struct *)ctx[1]; in subprog_release() 29 struct task_struct *task = (struct task_struct *)ctx[1]; in refcounted_fail__global_subprog()
|
| A D | iters_testmod.c | 15 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_trusted() 35 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_trusted_or_null() 53 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu() 55 struct task_struct *task_ptr; in iter_next_rcu() 73 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu_or_null() 75 struct task_struct *task_ptr; in iter_next_rcu_or_null() 91 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu_not_trusted() 93 struct task_struct *task_ptr; in iter_next_rcu_not_trusted()
|
| A D | cpumask_failure.c | 36 int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 49 int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 64 int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() 77 int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() 87 int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 111 int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) in BPF_PROG() 121 int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 155 int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags) in BPF_PROG() 208 int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags) in BPF_PROG() 228 int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags) in BPF_PROG() [all …]
|
| A D | struct_ops_kptr_return_fail__invalid_scalar.c | 9 void bpf_task_release(struct task_struct *p) __ksym; 16 struct task_struct *BPF_PROG(kptr_return_fail__invalid_scalar, int dummy, in BPF_PROG() 17 struct task_struct *task, struct cgroup *cgrp) in BPF_PROG() 20 return (struct task_struct *)1; in BPF_PROG()
|
| A D | struct_ops_kptr_return_fail__nonzero_offset.c | 9 void bpf_task_release(struct task_struct *p) __ksym; 16 struct task_struct *BPF_PROG(kptr_return_fail__nonzero_offset, int dummy, in BPF_PROG() 17 struct task_struct *task, struct cgroup *cgrp) in BPF_PROG() 19 return (struct task_struct *)&task->jobctl; in BPF_PROG()
|
| A D | rcu_read_lock.c | 27 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 33 struct task_struct *task; in get_cgroup_id() 54 struct task_struct *task, *real_parent; in task_succ() 83 struct task_struct *task, *real_parent; in no_lock() 95 struct task_struct *task, *real_parent; in two_regions() 114 struct task_struct *task, *real_parent; in non_sleepable_1() 130 struct task_struct *task, *real_parent; in non_sleepable_2() 177 struct task_struct *task; in miss_lock() 191 struct task_struct *task; in miss_unlock() 203 struct task_struct *task, *real_parent; in non_sleepable_rcu_mismatch() [all …]
|
| A D | test_overhead.c | 10 struct task_struct; 13 int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec) in BPF_KPROBE() 31 int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec) in BPF_PROG() 37 int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec) in BPF_PROG()
|
| A D | struct_ops_kptr_return_fail__local_kptr.c | 10 void bpf_task_release(struct task_struct *p) __ksym; 17 struct task_struct *BPF_PROG(kptr_return_fail__local_kptr, int dummy, in BPF_PROG() 18 struct task_struct *task, struct cgroup *cgrp) in BPF_PROG() 20 struct task_struct *t; in BPF_PROG()
|
| A D | struct_ops_kptr_return.c | 8 void bpf_task_release(struct task_struct *p) __ksym; 15 struct task_struct *BPF_PROG(kptr_return, int dummy, in BPF_PROG() 16 struct task_struct *task, struct cgroup *cgrp) in BPF_PROG()
|
| A D | test_subprogs.c | 57 return BPF_CORE_READ((struct task_struct *)(void *)t, tgid); in get_task_tgid() 71 struct task_struct *t = (void *)bpf_get_current_task(); in prog1() 83 struct task_struct *t = (void *)bpf_get_current_task(); in prog2() 101 struct task_struct *t = (void *)bpf_get_current_task(); in prog3() 117 struct task_struct *t = (void *)bpf_get_current_task(); in prog4()
|
| /tools/testing/selftests/sched_ext/ |
| A D | maximal.bpf.c | 39 void BPF_STRUCT_OPS(maximal_running, struct task_struct *p) in BPF_STRUCT_OPS() 48 bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from, in BPF_STRUCT_OPS() 49 struct task_struct *to) in BPF_STRUCT_OPS() 54 bool BPF_STRUCT_OPS(maximal_core_sched_before, struct task_struct *a, in BPF_STRUCT_OPS() 55 struct task_struct *b) in BPF_STRUCT_OPS() 63 void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p, in BPF_STRUCT_OPS() 84 s32 BPF_STRUCT_OPS(maximal_init_task, struct task_struct *p, in BPF_STRUCT_OPS() 90 void BPF_STRUCT_OPS(maximal_enable, struct task_struct *p) in BPF_STRUCT_OPS() 93 void BPF_STRUCT_OPS(maximal_exit_task, struct task_struct *p, in BPF_STRUCT_OPS() 97 void BPF_STRUCT_OPS(maximal_disable, struct task_struct *p) in BPF_STRUCT_OPS() [all …]
|
| A D | maybe_null.bpf.c | 12 void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) in BPF_STRUCT_OPS() 15 void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 21 bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from, in BPF_STRUCT_OPS() 22 struct task_struct *to) in BPF_STRUCT_OPS()
|
| A D | select_cpu_vtime.bpf.c | 28 static inline u64 task_vtime(const struct task_struct *p) in task_vtime() 38 s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 54 void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 60 void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p) in BPF_STRUCT_OPS() 66 void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p, in BPF_STRUCT_OPS() 72 void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p) in BPF_STRUCT_OPS()
|
| A D | init_enable_count.bpf.c | 18 s32 BPF_STRUCT_OPS_SLEEPABLE(cnt_init_task, struct task_struct *p, in BPF_STRUCT_OPS_SLEEPABLE() 31 void BPF_STRUCT_OPS(cnt_exit_task, struct task_struct *p) in BPF_STRUCT_OPS() 36 void BPF_STRUCT_OPS(cnt_enable, struct task_struct *p) in BPF_STRUCT_OPS() 41 void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p) in BPF_STRUCT_OPS()
|
| A D | exit.bpf.c | 20 s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 31 void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 39 void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 47 void BPF_STRUCT_OPS(exit_enable, struct task_struct *p) in BPF_STRUCT_OPS() 53 s32 BPF_STRUCT_OPS(exit_init_task, struct task_struct *p, in BPF_STRUCT_OPS()
|
| A D | maybe_null_fail_yld.bpf.c | 12 void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) in BPF_STRUCT_OPS() 15 bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from, in BPF_STRUCT_OPS() 16 struct task_struct *to) in BPF_STRUCT_OPS()
|
| /tools/bpf/runqslower/ |
| A D | runqslower.bpf.c | 28 static int trace_enqueue(struct task_struct *t) in trace_enqueue() 49 struct task_struct *p = (void *)ctx[0]; in handle__sched_wakeup() 58 struct task_struct *p = (void *)ctx[0]; in handle__sched_wakeup_new() 69 struct task_struct *prev = (struct task_struct *)ctx[1]; in handle__sched_switch() 70 struct task_struct *next = (struct task_struct *)ctx[2]; in handle__sched_switch()
|
| /tools/perf/util/bpf_skel/ |
| A D | kwork_top.bpf.c | 175 struct task_struct *prev, *next; in on_switch() 177 prev = (struct task_struct *)ctx[1]; in on_switch() 178 next = (struct task_struct *)ctx[2]; in on_switch() 199 struct task_struct *task; in on_irq_handler_entry() 211 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_entry() 234 struct task_struct *task; in on_irq_handler_exit() 247 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_exit() 271 struct task_struct *task; in on_softirq_entry() 283 task = (struct task_struct *)bpf_get_current_task(); in on_softirq_entry() 306 struct task_struct *task; in on_softirq_exit() [all …]
|
| /tools/sched_ext/ |
| A D | scx_simple.bpf.c | 55 s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() 69 void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 90 void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev) in BPF_STRUCT_OPS() 95 void BPF_STRUCT_OPS(simple_running, struct task_struct *p) in BPF_STRUCT_OPS() 110 void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() 127 void BPF_STRUCT_OPS(simple_enable, struct task_struct *p) in BPF_STRUCT_OPS()
|