Searched refs:links (Results 1 – 8 of 8) sorted by relevance
| /kernel/bpf/preload/ |
| A D | bpf_preload_kern.c | 51 maps_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd); in load_skel() 56 progs_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd); in load_skel() 64 close_fd(skel->links.dump_bpf_map_fd); in load_skel() 65 skel->links.dump_bpf_map_fd = 0; in load_skel() 66 close_fd(skel->links.dump_bpf_prog_fd); in load_skel() 67 skel->links.dump_bpf_prog_fd = 0; in load_skel()
|
| /kernel/bpf/preload/iterators/ |
| A D | iterators.lskel-big-endian.h | 20 } links; member 30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach() 41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach() 58 skel_closenz(skel->links.dump_bpf_map_fd); in iterators_bpf__detach() 59 skel_closenz(skel->links.dump_bpf_prog_fd); in iterators_bpf__detach() 80 skel->ctx.sz = (void *)&skel->links - (void *)skel; in iterators_bpf__open()
|
| A D | iterators.lskel-little-endian.h | 20 } links; member 30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach() 41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach() 58 skel_closenz(skel->links.dump_bpf_map_fd); in iterators_bpf__detach() 59 skel_closenz(skel->links.dump_bpf_prog_fd); in iterators_bpf__detach() 80 skel->ctx.sz = (void *)&skel->links - (void *)skel; in iterators_bpf__open()
|
| /kernel/gcov/ |
| A D | fs.c | 60 struct dentry **links; member 485 node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); in add_links() 486 if (!node->links) in add_links() 497 node->links[i] = debugfs_create_symlink(deskew(basename), in add_links() 506 debugfs_remove(node->links[i]); in add_links() 507 kfree(node->links); in add_links() 508 node->links = NULL; in add_links() 578 if (!node->links) in remove_links() 581 debugfs_remove(node->links[i]); in remove_links() 582 kfree(node->links); in remove_links() [all …]
|
| /kernel/bpf/ |
| A D | net_namespace.c | 71 list_for_each_entry(pos, &net->bpf.links[type], node) { in link_index() 84 list_for_each(pos, &net->bpf.links[type]) in link_count() 95 list_for_each_entry(pos, &net->bpf.links[type], node) { in fill_prog_array() 315 if (!list_empty(&net->bpf.links[type])) { in netns_bpf_prog_attach() 370 if (!list_empty(&net->bpf.links[type])) in __netns_bpf_prog_detach() 461 list_add_tail(&net_link->node, &net->bpf.links[type]); in netns_bpf_link_attach() 532 INIT_LIST_HEAD(&net->bpf.links[type]); in netns_bpf_pernet_init() 545 list_for_each_entry(net_link, &net->bpf.links[type], node) { in netns_bpf_pernet_pre_exit()
|
| A D | bpf_struct_ops.c | 33 struct bpf_link **links; member 529 if (!st_map->links[i]) in bpf_struct_ops_map_put_progs() 531 bpf_link_put(st_map->links[i]); in bpf_struct_ops_map_put_progs() 532 st_map->links[i] = NULL; in bpf_struct_ops_map_put_progs() 598 tlinks[BPF_TRAMP_FENTRY].links[0] = link; in bpf_struct_ops_prepare_trampoline() 732 plink = st_map->links; in bpf_struct_ops_map_update_elem() 961 if (st_map->links) in __bpf_struct_ops_map_free() 965 bpf_map_area_free(st_map->links); in __bpf_struct_ops_map_free() 1094 st_map->links = in bpf_struct_ops_map_alloc() 1101 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) { in bpf_struct_ops_map_alloc()
|
| A D | trampoline.c | 237 struct bpf_tramp_link **links; in bpf_trampoline_get_progs() local 248 links = tlinks[kind].links; in bpf_trampoline_get_progs() 252 *links++ = link; in bpf_trampoline_get_progs()
|
| /kernel/cgroup/ |
| A D | cgroup-v1.c | 194 struct list_head links; member 210 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) in cgroup1_pidlist_destroy_all() 232 list_del(&l->links); in cgroup_pidlist_destroy_work_fn() 295 list_for_each_entry(l, &cgrp->pidlists, links) in cgroup_pidlist_find() 328 list_add(&l->links, &cgrp->pidlists); in cgroup_pidlist_find_create()
|
Completed in 20 milliseconds