| /kernel/ |
| A D | fail_function.c | 60 attr = kzalloc(sizeof(*attr), GFP_KERNEL); in fei_attr_new() 61 if (attr) { in fei_attr_new() 64 kfree(attr); in fei_attr_new() 72 return attr; in fei_attr_new() 77 if (attr) { in fei_attr_free() 79 kfree(attr); in fei_attr_free() 89 return attr; in fei_attr_lookup() 100 if (attr == _attr) in fei_attr_is_valid() 124 if (attr->kp.addr) { in fei_retval_set() 265 if (!attr) { in fei_write() [all …]
|
| A D | ksysfs.c | 248 &fscaps_attr.attr, 249 &uevent_seqnum_attr.attr, 250 &cpu_byteorder_attr.attr, 251 &address_bits_attr.attr, 253 &uevent_helper_attr.attr, 256 &profiling_attr.attr, 259 &kexec_loaded_attr.attr, 262 &kexec_crash_size_attr.attr, 266 &vmcoreinfo_attr.attr, 272 &rcu_expedited_attr.attr, [all …]
|
| A D | reboot.c | 1077 struct kobj_attribute *attr, char *buf) in hw_protection_show() argument 1083 struct kobj_attribute *attr, const char *buf, in hw_protection_store() argument 1220 static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, in mode_store() argument 1250 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, in force_store() argument 1297 static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, in type_store() argument 1330 static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, in cpu_store() argument 1356 &hw_protection_attr.attr, 1357 &reboot_mode_attr.attr, 1359 &reboot_force_attr.attr, 1360 &reboot_type_attr.attr, [all …]
|
| /kernel/power/ |
| A D | main.c | 473 &success.attr, 474 &fail.attr, 475 &failed_freeze.attr, 480 &failed_resume.attr, 486 &last_hw_sleep.attr, 488 &max_hw_sleep.attr, 494 if (attr != &last_hw_sleep.attr && in suspend_attr_is_visible() 495 attr != &total_hw_sleep.attr && in suspend_attr_is_visible() 496 attr != &max_hw_sleep.attr) in suspend_attr_is_visible() 1007 &state_attr.attr, [all …]
|
| A D | hibernate.c | 1179 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, in disk_show() argument 1218 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, in disk_store() argument 1331 struct kobj_attribute *attr, char *buf) in resume_offset_show() argument 1337 struct kobj_attribute *attr, const char *buf, in resume_offset_store() argument 1375 struct kobj_attribute *attr, char *buf) in reserved_size_show() argument 1381 struct kobj_attribute *attr, in reserved_size_store() argument 1397 &disk_attr.attr, 1398 &resume_offset_attr.attr, 1399 &resume_attr.attr, 1400 &image_size_attr.attr, [all …]
|
| /kernel/sched/ |
| A D | syscalls.c | 301 __setparam_dl(p, attr); in DEFINE_CLASS() 303 __setparam_fair(p, attr); in DEFINE_CLASS() 681 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); in __sched_setscheduler() 758 struct sched_attr attr = { in _sched_setscheduler() local 861 struct sched_attr attr = { in sched_set_normal() local 895 memset(attr, 0, sizeof(*attr)); in sched_copy_attr() 907 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); in sched_copy_attr() 922 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); in sched_copy_attr() 934 __getparam_dl(p, attr); in get_params() 980 struct sched_attr attr; in SYSCALL_DEFINE3() local [all …]
|
| /kernel/bpf/ |
| A D | syscall.c | 1366 attr->btf_key_type_id || attr->btf_value_type_id) in map_create() 1502 if (attr->btf_key_type_id || attr->btf_value_type_id || in map_create() 3975 event->attr.type, event->attr.config, in bpf_perf_event_link_show_fdinfo() 4457 if (attr->attach_flags || attr->relative_fd) in bpf_prog_detach() 4577 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || in bpf_prog_test_run() 4578 (!attr->test.ctx_size_in && attr->test.ctx_in)) in bpf_prog_test_run() 4582 (!attr->test.ctx_size_out && attr->test.ctx_out)) in bpf_prog_test_run() 6001 memset(&attr, 0, sizeof(attr)); in __sys_bpf() 6189 if (attr->test.data_in || attr->test.data_out || in kern_sys_bpf() 6190 attr->test.ctx_out || attr->test.duration || in kern_sys_bpf() [all …]
|
| A D | tcx.c | 20 dev = __dev_get_by_index(net, attr->target_ifindex); in tcx_prog_attach() 25 if (attr->attach_flags & BPF_F_REPLACE) { in tcx_prog_attach() 40 attr->attach_flags, attr->relative_fd, in tcx_prog_attach() 41 attr->expected_revision); in tcx_prog_attach() 61 bool ingress = attr->attach_type == BPF_TCX_INGRESS; in tcx_prog_detach() 68 dev = __dev_get_by_index(net, attr->target_ifindex); in tcx_prog_detach() 79 attr->relative_fd, attr->expected_revision); in tcx_prog_detach() 300 const union bpf_attr *attr, in tcx_link_init() argument 305 attr->link_create.attach_type); in tcx_link_init() 335 attr->link_create.tcx.relative_fd, in tcx_link_attach() [all …]
|
| A D | bloom_filter.c | 84 static int bloom_map_alloc_check(union bpf_attr *attr) in bloom_map_alloc_check() argument 86 if (attr->value_size > KMALLOC_MAX_SIZE) in bloom_map_alloc_check() 98 int numa_node = bpf_map_attr_numa_node(attr); in bloom_map_alloc() 101 if (attr->key_size != 0 || attr->value_size == 0 || in bloom_map_alloc() 102 attr->max_entries == 0 || in bloom_map_alloc() 103 attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || in bloom_map_alloc() 104 !bpf_map_flags_access_ok(attr->map_flags) || in bloom_map_alloc() 108 (attr->map_extra & ~0xF)) in bloom_map_alloc() 111 nr_hash_funcs = attr->map_extra; in bloom_map_alloc() 151 bpf_map_init_from_attr(&bloom->map, attr); in bloom_map_alloc() [all …]
|
| A D | net_namespace.c | 263 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) in __netns_bpf_prog_query() 267 attr->query.prog_cnt); in __netns_bpf_prog_query() 270 int netns_bpf_prog_query(const union bpf_attr *attr, in netns_bpf_prog_query() argument 277 if (attr->query.query_flags) in netns_bpf_prog_query() 284 net = get_net_ns_by_fd(attr->query.target_fd); in netns_bpf_prog_query() 304 if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd) in netns_bpf_prog_attach() 307 type = to_netns_bpf_attach_type(attr->attach_type); in netns_bpf_prog_attach() 388 if (attr->target_fd) in netns_bpf_prog_detach() 391 type = to_netns_bpf_attach_type(attr->attach_type); in netns_bpf_prog_detach() 485 if (attr->link_create.flags) in netns_bpf_link_create() [all …]
|
| A D | queue_stack_maps.c | 47 static int queue_stack_map_alloc_check(union bpf_attr *attr) in queue_stack_map_alloc_check() argument 50 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check() 51 attr->value_size == 0 || in queue_stack_map_alloc_check() 52 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK || in queue_stack_map_alloc_check() 53 !bpf_map_flags_access_ok(attr->map_flags)) in queue_stack_map_alloc_check() 56 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check() 65 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) in queue_stack_map_alloc() argument 67 int numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc() 71 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 72 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() [all …]
|
| A D | hashtab.c | 448 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check() 449 attr->value_size == 0) in htab_map_alloc_check() 452 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check() 1678 const union bpf_attr *attr, in __htab_map_lookup_and_delete_batch() argument 1705 map_flags = attr->batch.flags; in __htab_map_lookup_and_delete_batch() 1709 max_count = attr->batch.count; in __htab_map_lookup_and_delete_batch() 1917 const union bpf_attr *attr, in htab_percpu_map_lookup_and_delete_batch() argument 1934 const union bpf_attr *attr, in htab_map_lookup_and_delete_batch() argument 1943 const union bpf_attr *attr, in htab_lru_percpu_map_lookup_batch() argument 2492 return htab_map_alloc_check(attr); in fd_htab_map_alloc_check() [all …]
|
| A D | arraymap.c | 58 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check() 59 attr->value_size == 0 || in array_map_alloc_check() 74 if (attr->value_size > INT_MAX) in array_map_alloc_check() 94 max_entries = attr->max_entries; in array_map_alloc() 111 if (max_entries < attr->max_entries) in array_map_alloc() 122 if (attr->map_flags & BPF_F_MMAPABLE) { in array_map_alloc() 131 if (attr->map_flags & BPF_F_MMAPABLE) { in array_map_alloc() 830 if (attr->value_size != sizeof(u32)) in fd_array_map_alloc_check() 835 return array_map_alloc_check(attr); in fd_array_map_alloc_check() 1161 map = array_map_alloc(attr); in prog_array_map_alloc() [all …]
|
| A D | arena.c | 95 static struct bpf_map *arena_map_alloc(union bpf_attr *attr) in arena_map_alloc() argument 98 int numa_node = bpf_map_attr_numa_node(attr); in arena_map_alloc() 106 if (attr->key_size || attr->value_size || attr->max_entries == 0 || in arena_map_alloc() 108 !(attr->map_flags & BPF_F_MMAPABLE) || in arena_map_alloc() 110 (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV))) in arena_map_alloc() 113 if (attr->map_extra & ~PAGE_MASK) in arena_map_alloc() 117 vm_range = (u64)attr->max_entries * PAGE_SIZE; in arena_map_alloc() 121 if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32)) in arena_map_alloc() 134 arena->user_vm_start = attr->map_extra; in arena_map_alloc() 139 bpf_map_init_from_attr(&arena->map, attr); in arena_map_alloc() [all …]
|
| A D | reuseport_array.c | 41 static int reuseport_array_alloc_check(union bpf_attr *attr) in reuseport_array_alloc_check() argument 43 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check() 44 attr->value_size != sizeof(u64)) in reuseport_array_alloc_check() 47 return array_map_alloc_check(attr); in reuseport_array_alloc_check() 149 static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) in reuseport_array_alloc() argument 151 int numa_node = bpf_map_attr_numa_node(attr); in reuseport_array_alloc() 155 array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); in reuseport_array_alloc() 160 bpf_map_init_from_attr(&array->map, attr); in reuseport_array_alloc()
|
| A D | sysfs_btf.c | 19 const struct bin_attribute *attr, in btf_sysfs_vmlinux_mmap() argument 22 unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT; in btf_sysfs_vmlinux_mmap() 27 if (attr->private != __start_BTF || !PAGE_ALIGNED(addr)) in btf_sysfs_vmlinux_mmap() 47 .attr = { .name = "vmlinux", .mode = 0444, },
|
| A D | offload.c | 234 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && in bpf_prog_dev_bound_init() 235 attr->prog_type != BPF_PROG_TYPE_XDP) in bpf_prog_dev_bound_init() 244 if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS && in bpf_prog_dev_bound_init() 245 !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)) in bpf_prog_dev_bound_init() 248 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && in bpf_prog_dev_bound_init() 249 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) in bpf_prog_dev_bound_init() 514 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) in bpf_map_offload_map_alloc() argument 523 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in bpf_map_offload_map_alloc() 524 attr->map_type != BPF_MAP_TYPE_HASH) in bpf_map_offload_map_alloc() 531 bpf_map_init_from_attr(&offmap->map, attr); in bpf_map_offload_map_alloc() [all …]
|
| A D | mprog.c | 395 int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, in bpf_mprog_query() argument 408 if (attr->query.query_flags || attr->query.attach_flags) in bpf_mprog_query() 420 uprog_id = u64_to_user_ptr(attr->query.prog_ids); in bpf_mprog_query() 421 uprog_flags = u64_to_user_ptr(attr->query.prog_attach_flags); in bpf_mprog_query() 422 ulink_id = u64_to_user_ptr(attr->query.link_ids); in bpf_mprog_query() 423 ulink_flags = u64_to_user_ptr(attr->query.link_attach_flags); in bpf_mprog_query() 424 if (attr->query.count == 0 || !uprog_id || !count) in bpf_mprog_query() 426 if (attr->query.count < count) { in bpf_mprog_query() 427 count = attr->query.count; in bpf_mprog_query()
|
| A D | local_storage.c | 285 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) in cgroup_storage_map_alloc() argument 288 int numa_node = bpf_map_attr_numa_node(attr); in cgroup_storage_map_alloc() 294 if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in cgroup_storage_map_alloc() 298 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && in cgroup_storage_map_alloc() 299 attr->key_size != sizeof(__u64)) in cgroup_storage_map_alloc() 302 if (attr->value_size == 0) in cgroup_storage_map_alloc() 305 if (attr->value_size > max_value_size) in cgroup_storage_map_alloc() 308 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc() 309 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc() 312 if (attr->max_entries) in cgroup_storage_map_alloc() [all …]
|
| A D | lpm_trie.c | 571 static struct bpf_map *trie_alloc(union bpf_attr *attr) in trie_alloc() argument 578 if (attr->max_entries == 0 || in trie_alloc() 579 !(attr->map_flags & BPF_F_NO_PREALLOC) || in trie_alloc() 580 attr->map_flags & ~LPM_CREATE_FLAG_MASK || in trie_alloc() 581 !bpf_map_flags_access_ok(attr->map_flags) || in trie_alloc() 582 attr->key_size < LPM_KEY_SIZE_MIN || in trie_alloc() 583 attr->key_size > LPM_KEY_SIZE_MAX || in trie_alloc() 584 attr->value_size < LPM_VAL_SIZE_MIN || in trie_alloc() 585 attr->value_size > LPM_VAL_SIZE_MAX) in trie_alloc() 593 bpf_map_init_from_attr(&trie->map, attr); in trie_alloc() [all …]
|
| /kernel/module/ |
| A D | sysfs.c | 98 sattr->attr.name = in add_sect_attrs() 100 if (!sattr->attr.name) { in add_sect_attrs() 107 sattr->attr.mode = 0400; in add_sect_attrs() 189 nattr->attr.name = mod->sect_attrs->attrs[loaded].attr.name; in add_notes_attrs() 190 nattr->attr.mode = 0444; in add_notes_attrs() 279 if (!attr->attr.name) in module_remove_modinfo_attrs() 281 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); in module_remove_modinfo_attrs() 282 if (attr->free) in module_remove_modinfo_attrs() 283 attr->free(mod); in module_remove_modinfo_attrs() 303 if (!attr->test || attr->test(mod)) { in module_add_modinfo_attrs() [all …]
|
| /kernel/events/ |
| A D | hw_breakpoint.c | 621 __release_bp_slot(bp, bp->attr.bp_type); in release_bp_slot() 683 __release_bp_slot(bp, bp->attr.bp_type); in dbg_release_bp_slot() 700 if (attr->exclude_kernel) in hw_breakpoint_parse() 767 err = hw_breakpoint_parse(bp, attr, &hw); in modify_user_hw_breakpoint_check() 774 old_attr = bp->attr; in modify_user_hw_breakpoint_check() 775 hw_breakpoint_copy_attr(&old_attr, attr); in modify_user_hw_breakpoint_check() 776 if (memcmp(&old_attr, attr, sizeof(*attr))) in modify_user_hw_breakpoint_check() 780 if (bp->attr.bp_type != attr->bp_type) { in modify_user_hw_breakpoint_check() 781 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type); in modify_user_hw_breakpoint_check() 786 hw_breakpoint_copy_attr(&bp->attr, attr); in modify_user_hw_breakpoint_check() [all …]
|
| A D | core.c | 3390 if (event->attr.type != attr->type) in perf_event_modify_attr() 5179 struct perf_event_attr *attr = &event->attr; in is_sb_event() local 5187 if (attr->mmap || attr->mmap_data || attr->mmap2 || in is_sb_event() 5188 attr->comm || attr->comm_exec || in is_sb_event() 5189 attr->task || attr->ksymbol || in is_sb_event() 12877 event->attr = *attr; in perf_event_alloc() 13060 memset(attr, 0, sizeof(*attr)); in perf_copy_attr() 13081 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) in perf_copy_attr() 13161 if (!attr->inherit && attr->inherit_thread) in perf_copy_attr() 13579 if (attr.exclusive || attr.pinned) in SYSCALL_DEFINE5() [all …]
|
| /kernel/trace/ |
| A D | trace_event_perf.c | 64 if (!p_event->attr.exclude_callchain_user) in perf_trace_event_perm() 71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) in perf_trace_event_perm() 76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) in perf_trace_event_perm() 218 u64 event_id = p_event->attr.config; in perf_trace_init() 253 if (p_event->attr.kprobe_func) { in perf_kprobe_init() 254 func = strndup_user(u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 269 p_event->attr.probe_offset, is_retprobe); in perf_kprobe_init() 305 if (!p_event->attr.uprobe_path) in perf_uprobe_init() 308 path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path), in perf_uprobe_init() [all …]
|
| /kernel/irq/ |
| A D | irqdesc.c | 279 static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in hwirq_show() argument 290 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in type_show() argument 300 static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in wakeup_show() argument 309 static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in name_show() argument 341 &per_cpu_count_attr.attr, 342 &chip_name_attr.attr, 343 &hwirq_attr.attr, 344 &type_attr.attr, 345 &wakeup_attr.attr, 346 &name_attr.attr, [all …]
|