| /kernel/bpf/ |
| A D | mprog.c | 25 tuple->link = link; in bpf_mprog_link() 203 (!tuple->link || tuple->link == cp->link)) in bpf_mprog_pos_before() 219 (!tuple->link || tuple->link == cp->link)) in bpf_mprog_pos_after() 233 .link = link, in bpf_mprog_attach() 236 .link = link, in bpf_mprog_attach() 312 link = cp->link; in bpf_mprog_fetch() 319 if (link && !tuple->link) in bpf_mprog_fetch() 322 WARN_ON_ONCE(tuple->link && tuple->link != link); in bpf_mprog_fetch() 324 tuple->link = link; in bpf_mprog_fetch() 335 .link = link, in bpf_mprog_detach() [all …]
|
| A D | tcx.c | 112 if (tuple.link) in tcx_uninstall() 113 tcx_link(tuple.link)->dev = NULL; in tcx_uninstall() 154 ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags, in tcx_link_prog_attach() 186 ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0); in tcx_link_release() 218 if (oprog && link->prog != oprog) { in tcx_link_update() 222 oprog = link->prog; in tcx_link_update() 234 link->prog->aux->id, 0); in tcx_link_update() 237 oprog = xchg(&link->prog, nprog); in tcx_link_update() 248 kfree(tcx_link(link)); in tcx_link_dealloc() 263 link->attach_type, in tcx_link_fdinfo() [all …]
|
| A D | bpf_iter.c | 17 struct bpf_link link; member 394 container_of(link, struct bpf_iter_link, link); in bpf_iter_link_release() 403 container_of(link, struct bpf_iter_link, link); in bpf_iter_link_dealloc() 439 container_of(link, struct bpf_iter_link, link); in bpf_iter_link_show_fdinfo() 455 container_of(link, struct bpf_iter_link, link); in bpf_iter_link_fill_link_info() 551 link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); in bpf_iter_link_attach() 552 if (!link) in bpf_iter_link_attach() 559 err = bpf_link_prime(&link->link, &link_primer); in bpf_iter_link_attach() 561 kfree(link); in bpf_iter_link_attach() 600 prog = link->link.prog; in prepare_seq_file() [all …]
|
| A D | net_namespace.c | 13 struct bpf_link link; member 66 struct bpf_netns_link *link) in link_index() argument 72 if (pos == link) in link_index() 104 container_of(link, struct bpf_netns_link, link); in bpf_netns_link_release() 151 bpf_netns_link_release(link); in bpf_netns_link_detach() 158 container_of(link, struct bpf_netns_link, link); in bpf_netns_link_dealloc() 168 container_of(link, struct bpf_netns_link, link); in bpf_netns_link_update_prog() 207 container_of(link, struct bpf_netns_link, link); in bpf_netns_link_fill_info() 227 bpf_netns_link_fill_info(link, &info); in bpf_netns_link_show_fdinfo() 232 link->attach_type); in bpf_netns_link_show_fdinfo() [all …]
|
| A D | trampoline.c | 251 *ip_arg |= link->link.prog->call_get_func_ip; in bpf_trampoline_get_progs() 558 kind = bpf_attach_type_to_tramp(link->link.prog); in __bpf_trampoline_link_prog() 575 tr->extension_prog = link->link.prog; in __bpf_trampoline_link_prog() 577 link->link.prog->bpf_func); in __bpf_trampoline_link_prog() 585 if (link_exiting->link.prog != link->link.prog) in __bpf_trampoline_link_prog() 652 container_of(link, struct bpf_shim_tramp_link, link.link); in bpf_shim_tramp_link_release() 665 container_of(link, struct bpf_shim_tramp_link, link.link); in bpf_shim_tramp_link_dealloc() 719 struct bpf_prog *p = link->link.prog; in cgroup_shim_find() 759 bpf_link_inc(&shim_link->link.link); in bpf_trampoline_link_cgroup_shim() 788 bpf_link_put(&shim_link->link.link); in bpf_trampoline_link_cgroup_shim() [all …]
|
| A D | cgroup.c | 378 return pl->link->link.prog; in prog_list_prog() 642 if (link && pl->link == link) in find_attach_entry() 843 prog ? : link->link.prog, cgrp)) in __cgroup_bpf_attach() 864 pl->link = link; in __cgroup_bpf_attach() 952 if (pl->link == link) in replace_effective_prog() 963 WRITE_ONCE(item->prog, link->link.prog); in replace_effective_prog() 993 if (link->link.prog->type != new_prog->type) in __cgroup_bpf_replace() 997 if (pl->link == link) { in __cgroup_bpf_replace() 1062 if (pl->prog == prog && pl->link == link) in find_detach_entry() 1178 pl->link = link; in __cgroup_bpf_detach() [all …]
|
| A D | syscall.c | 3140 link->ops->dealloc(link); in bpf_link_dealloc() 3176 if (link->sleepable || (link->prog && link->prog->sleepable)) in bpf_link_free() 3344 primer->link = link; in bpf_link_prime() 3389 container_of(link, struct bpf_tracing_link, link.link); in bpf_tracing_link_release() 3405 container_of(link, struct bpf_tracing_link, link.link); in bpf_tracing_link_dealloc() 3414 container_of(link, struct bpf_tracing_link, link.link); in bpf_tracing_link_show_fdinfo() 3434 container_of(link, struct bpf_tracing_link, link.link); in bpf_tracing_link_fill_link_info() 3523 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, in bpf_tracing_prog_attach() 3526 link->link.cookie = bpf_cookie; in bpf_tracing_prog_attach() 3608 err = bpf_link_prime(&link->link.link, &link_primer); in bpf_tracing_prog_attach() [all …]
|
| A D | bpf_struct_ops.c | 804 link = kzalloc(sizeof(*link), GFP_USER); in bpf_struct_ops_map_update_elem() 805 if (!link) { in bpf_struct_ops_map_update_elem() 810 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, in bpf_struct_ops_map_update_elem() 812 *plink++ = &link->link; in bpf_struct_ops_map_update_elem() 1192 st_link = container_of(link, struct bpf_struct_ops_link, link); in bpf_struct_ops_map_link_dealloc() 1208 st_link = container_of(link, struct bpf_struct_ops_link, link); in bpf_struct_ops_map_link_show_fdinfo() 1349 link = kzalloc(sizeof(*link), GFP_USER); in bpf_struct_ops_link_create() 1350 if (!link) { in bpf_struct_ops_link_create() 1357 err = bpf_link_prime(&link->link, &link_primer); in bpf_struct_ops_link_create() 1371 link = NULL; in bpf_struct_ops_link_create() [all …]
|
| A D | link_iter.c | 16 struct bpf_link *link; in bpf_link_seq_start() local 18 link = bpf_link_get_curr_or_next(&info->link_id); in bpf_link_seq_start() 19 if (!link) in bpf_link_seq_start() 24 return link; in bpf_link_seq_start() 39 __bpf_md_ptr(struct bpf_link *, link); 42 DEFINE_BPF_ITER_FUNC(bpf_link, struct bpf_iter_meta *meta, struct bpf_link *link) in DEFINE_BPF_ITER_FUNC() argument 52 ctx.link = v; in DEFINE_BPF_ITER_FUNC() 94 { offsetof(struct bpf_iter__bpf_link, link),
|
| A D | range_tree.c | 89 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; in __range_size_insert() local 93 while (*link) { in __range_size_insert() 94 rb = *link; in __range_size_insert() 96 link = &rb->rb_left; in __range_size_insert() 98 link = &rb->rb_right; in __range_size_insert() 103 rb_link_node(&rn->rb_range_size, rb, link); in __range_size_insert()
|
| A D | inode.c | 363 struct bpf_link *link = arg; in bpf_mklink() local 366 bpf_link_is_iter(link) ? in bpf_mklink() 386 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); in bpf_symlink() local 389 if (!link) in bpf_symlink() 394 kfree(link); in bpf_symlink() 399 inode->i_link = link; in bpf_symlink() 411 .link = simple_link, 417 const char *name, struct bpf_link *link) in bpf_iter_link_pin_kernel() argument 995 bpf_link_inc(objs[i].link); in populate_bpffs() 997 objs[i].link_name, objs[i].link); in populate_bpffs() [all …]
|
| /kernel/trace/ |
| A D | bpf_trace.c | 2243 struct bpf_prog *prog = link->link.prog; in __bpf_trace_run() 2313 struct bpf_prog *prog = link->link.prog; in bpf_probe_register() 2693 link = run_ctx->link; in bpf_kprobe_multi_cookie() 2724 .link = link, in kprobe_multi_link_prog_run() 2741 err = bpf_prog_run(link->link.prog, regs); in kprobe_multi_link_prog_run() 2981 link = kzalloc(sizeof(*link), GFP_KERNEL); in bpf_kprobe_multi_link_attach() 3004 link->link.flags = flags; in bpf_kprobe_multi_link_attach() 3245 struct bpf_prog *prog = link->link.prog; in uprobe_prog_run() 3261 err = bpf_prog_run(link->link.prog, regs); in uprobe_prog_run() 3424 uprobes[i].link = link; in bpf_uprobe_multi_link_attach() [all …]
|
| A D | trace_probe.c | 2149 struct event_file_link *link; in trace_probe_add_file() local 2151 link = kmalloc(sizeof(*link), GFP_KERNEL); in trace_probe_add_file() 2152 if (!link) in trace_probe_add_file() 2155 link->file = file; in trace_probe_add_file() 2156 INIT_LIST_HEAD(&link->list); in trace_probe_add_file() 2165 struct event_file_link *link; in trace_probe_get_file_link() local 2168 if (link->file == file) in trace_probe_get_file_link() 2169 return link; in trace_probe_get_file_link() 2181 if (!link) in trace_probe_remove_file() 2184 list_del_rcu(&link->list); in trace_probe_remove_file() [all …]
|
| A D | trace_events_inject.c | 145 list_for_each_entry(field, head, link) { in trace_get_entry_size() 166 list_for_each_entry(field, head, link) { in trace_alloc_entry()
|
| A D | trace_fprobe.c | 354 struct event_file_link *link; in fentry_trace_func() local 356 trace_probe_for_each_link_rcu(link, &tf->tp) in fentry_trace_func() 357 __fentry_trace_func(tf, entry_ip, fregs, link->file); in fentry_trace_func() 441 struct event_file_link *link; in fexit_trace_func() local 443 trace_probe_for_each_link_rcu(link, &tf->tp) in fexit_trace_func() 444 __fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data, link->file); in fexit_trace_func()
|
| A D | trace_uprobe.c | 1051 struct event_file_link *link; in uprobe_trace_func() local 1060 trace_probe_for_each_link_rcu(link, &tu->tp) in uprobe_trace_func() 1061 __uprobe_trace_func(tu, 0, regs, ucb, link->file); in uprobe_trace_func() 1071 struct event_file_link *link; in uretprobe_trace_func() local 1077 trace_probe_for_each_link_rcu(link, &tu->tp) in uretprobe_trace_func() 1078 __uprobe_trace_func(tu, func, regs, ucb, link->file); in uretprobe_trace_func()
|
| A D | trace_events_user.c | 1102 list_for_each_entry_safe(field, next, head, link) { in user_event_destroy_fields() 1103 list_del(&field->link); in user_event_destroy_fields() 1159 list_add(&field->link, &user->fields); in user_event_add_field() 1395 list_for_each_entry_reverse(field, head, link) { in user_event_set_print_fmt() 1407 list_for_each_entry_reverse(field, head, link) { in user_event_set_print_fmt() 1824 list_for_each_entry_reverse(field, head, link) { in user_event_show() 1910 list_for_each_entry_reverse(field, head, link) { in user_fields_match()
|
| /kernel/ |
| A D | user-return-notifier.c | 18 hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list)); in user_return_notifier_register() 28 hlist_del(&urn->link); in user_return_notifier_unregister() 42 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
|
| /kernel/cgroup/ |
| A D | debug.c | 89 struct cgrp_cset_link *link; in current_css_set_cg_links_read() local 100 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { in current_css_set_cg_links_read() 101 struct cgroup *c = link->cgrp; in current_css_set_cg_links_read() 117 struct cgrp_cset_link *link; in cgroup_css_links_read() local 122 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { in cgroup_css_links_read() 123 struct css_set *cset = link->cset; in cgroup_css_links_read()
|
| A D | cgroup.c | 618 struct cgrp_cset_link *link; in __cgroup_task_count() local 970 cgroup_put(link->cgrp); in put_css_set_locked() 971 kfree(link); in put_css_set_locked() 1121 kfree(link); in free_cgrp_cset_links() 1141 link = kzalloc(sizeof(*link), GFP_KERNEL); in allocate_cgrp_cset_links() 1142 if (!link) { in allocate_cgrp_cset_links() 1168 link->cset = cset; in link_css_set() 1169 link->cgrp = cgrp; in link_css_set() 1370 kfree(link); in cgroup_destroy_root() 4868 cset = link->cset; in css_task_iter_next_css_set() [all …]
|
| /kernel/locking/ |
| A D | test-ww_mutex.c | 485 struct list_head link; member 508 list_add(&ll->link, &locks); in stress_reorder_work() 516 list_for_each_entry(ll, &locks, link) { in stress_reorder_work() 522 list_for_each_entry_continue_reverse(ln, &locks, link) in stress_reorder_work() 532 list_move(&ll->link, &locks); /* restarts iteration */ in stress_reorder_work() 536 list_for_each_entry(ll, &locks, link) in stress_reorder_work() 543 list_for_each_entry_safe(ll, ln, &locks, link) in stress_reorder_work()
|
| A D | locktorture.c | 632 struct list_head link; in torture_ww_mutex_lock() member 638 list_add(&locks[0].link, &list); in torture_ww_mutex_lock() 641 list_add(&locks[1].link, &list); in torture_ww_mutex_lock() 644 list_add(&locks[2].link, &list); in torture_ww_mutex_lock() 648 list_for_each_entry(ll, &list, link) { in torture_ww_mutex_lock() 656 list_for_each_entry_continue_reverse(ln, &list, link) in torture_ww_mutex_lock() 663 list_move(&ll->link, &list); in torture_ww_mutex_lock()
|
| /kernel/bpf/preload/ |
| A D | bpf_preload_kern.c | 27 obj[0].link = maps_link; in preload() 29 obj[1].link = progs_link; in preload()
|
| A D | bpf_preload.h | 7 struct bpf_link *link; member
|
| /kernel/sched/ |
| A D | topology.c | 1402 list_for_each_entry(entry, &asym_cap_list, link) { in asym_cpu_capacity_classify() 1439 list_for_each_entry(entry, &asym_cap_list, link) { in asym_cpu_capacity_update_data() 1443 insert_entry = list_prev_entry(entry, link); in asym_cpu_capacity_update_data() 1453 list_add_tail_rcu(&entry->link, &asym_cap_list); in asym_cpu_capacity_update_data() 1455 list_add_rcu(&entry->link, &insert_entry->link); in asym_cpu_capacity_update_data() 1470 list_for_each_entry(entry, &asym_cap_list, link) in asym_cpu_capacity_scan() 1476 list_for_each_entry_safe(entry, next, &asym_cap_list, link) { in asym_cpu_capacity_scan() 1478 list_del_rcu(&entry->link); in asym_cpu_capacity_scan() 1488 entry = list_first_entry(&asym_cap_list, typeof(*entry), link); in asym_cpu_capacity_scan() 1489 list_del_rcu(&entry->link); in asym_cpu_capacity_scan()
|