| /kernel/bpf/ |
| A D | net_namespace.c | 30 switch (type) { in netns_bpf_attach_type_unneed() 43 switch (type) { in netns_bpf_attach_type_need() 176 if (new_prog->type != link->prog->type) in bpf_netns_link_update_prog() 281 if (type < 0) in netns_bpf_prog_query() 308 if (type < 0) in netns_bpf_prog_attach() 320 switch (type) { in netns_bpf_prog_attach() 392 if (type < 0) in netns_bpf_prog_detach() 410 switch (type) { in netns_bpf_max_progs() 529 int type; in netns_bpf_pernet_init() local 531 for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) in netns_bpf_pernet_init() [all …]
|
| A D | crypto.c | 31 char type[14]; member 67 if (!strcmp(node->type->name, type->name)) in bpf_crypto_register_type() 76 node->type = type; in bpf_crypto_register_type() 94 if (strcmp(node->type->name, type->name)) in bpf_crypto_unregister_type() 119 type = node->type; in bpf_crypto_get_type() 124 return type; in bpf_crypto_get_type() 158 type = bpf_crypto_get_type(params->type); in bpf_crypto_ctx_create() 159 if (IS_ERR(type)) { in bpf_crypto_ctx_create() 160 *err = PTR_ERR(type); in bpf_crypto_ctx_create() 185 ctx->type = type; in bpf_crypto_ctx_create() [all …]
|
| A D | inode.c | 35 switch (type) { in bpf_any_get() 55 switch (type) { in bpf_any_put() 77 *type = BPF_TYPE_MAP; in bpf_fd_probe_obj() 83 *type = BPF_TYPE_PROG; in bpf_fd_probe_obj() 89 *type = BPF_TYPE_LINK; in bpf_fd_probe_obj() 133 *type = BPF_TYPE_PROG; in bpf_inode_type() 135 *type = BPF_TYPE_MAP; in bpf_inode_type() 137 *type = BPF_TYPE_LINK; in bpf_inode_type() 460 switch (type) { in bpf_obj_do_pin() 480 enum bpf_type type; in bpf_obj_pin_user() local [all …]
|
| A D | verifier.c | 403 type = reg->type; in reg_not_null() 407 type = base_type(type); in reg_not_null() 1535 s->type = type; in acquire_lock_state() 1644 if (!(s->type & type)) in find_lock_state() 2220 reg->dynptr.type = type; in __mark_dynptr_reg() 8889 if (rold->type != rcur->type) in maybe_widen_reg() 9401 u32 type = reg->type; in check_func_arg_reg_off() local 14537 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals() 18597 if (rold->type != rcur->type) in regsafe() 20271 type = t->type; in __check_pseudo_btf_id() [all …]
|
| A D | bpf_lru_list.c | 53 enum bpf_lru_list_type type) in bpf_lru_list_count_inc() argument 55 if (type < NR_BPF_LRU_LIST_COUNT) in bpf_lru_list_count_inc() 56 l->counts[type]++; in bpf_lru_list_count_inc() 60 enum bpf_lru_list_type type) in bpf_lru_list_count_dec() argument 62 if (type < NR_BPF_LRU_LIST_COUNT) in bpf_lru_list_count_dec() 63 l->counts[type]--; in bpf_lru_list_count_dec() 82 node->type = tgt_free_type; in __bpf_lru_node_move_to_free() 96 node->type = tgt_type; in __bpf_lru_node_move_in() 113 if (node->type != tgt_type) { in __bpf_lru_node_move() 116 node->type = tgt_type; in __bpf_lru_node_move() [all …]
|
| A D | btf.c | 912 id = m->type; in btf_member_is_reg_int() 1982 id = type->type; in __btf_resolve_size() 1983 type = btf_type_by_id(btf, type->type); in __btf_resolve_size() 1993 type = btf_type_by_id(btf, array->type); in __btf_resolve_size() 2951 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { in btf_array_check_meta() 3392 info->type = type; in btf_find_kptr() 3545 return type; in btf_get_field_type() 4003 rec->fields[i].type = info_arr[i].type; in btf_parse_fields() 4705 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { in btf_var_check_meta() 4774 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { in btf_datasec_check_meta() [all …]
|
| /kernel/sched/ |
| A D | isolation.c | 31 return !!(housekeeping.flags & BIT(type)); in housekeeping_enabled() 35 int housekeeping_any_cpu(enum hk_type type) in housekeeping_any_cpu() argument 40 if (housekeeping.flags & BIT(type)) { in housekeeping_any_cpu() 54 type != HK_TYPE_TIMER); in housekeeping_any_cpu() 64 if (housekeeping.flags & BIT(type)) in housekeeping_cpumask() 65 return housekeeping.cpumasks[type]; in housekeeping_cpumask() 73 if (housekeeping.flags & BIT(type)) in housekeeping_affine() 81 if (housekeeping.flags & BIT(type)) in housekeeping_test_cpu() 89 enum hk_type type; in housekeeping_init() local 153 enum hk_type type; in housekeeping_setup() local [all …]
|
| A D | core_sched.c | 131 int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, in sched_core_share_pid() argument 146 if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || in sched_core_share_pid() 174 if (type != PIDTYPE_PID || uaddr & 7) { in sched_core_share_pid() 199 if (type != PIDTYPE_PID) { in sched_core_share_pid() 212 if (type == PIDTYPE_PID) { in sched_core_share_pid() 218 grp = task_pid_type(task, type); in sched_core_share_pid() 220 do_each_pid_thread(grp, type, p) { in sched_core_share_pid() 225 } while_each_pid_thread(grp, type, p); in sched_core_share_pid() 227 do_each_pid_thread(grp, type, p) { in sched_core_share_pid() 229 } while_each_pid_thread(grp, type, p); in sched_core_share_pid()
|
| /kernel/events/ |
| A D | hw_breakpoint.c | 172 static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); } in hw_breakpoint_slots_cached() argument 182 return __nr_bp_slots[type]; in hw_breakpoint_slots_cached() 266 enum bp_type_idx type) in bp_slots_histogram_max_merge() argument 312 return bp_slots_histogram_max_merge(tsk_pinned, &tsk_pinned_all[type], type); in max_task_bp_pinned() 384 max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type); in max_bp_pinned_slots() 574 enum bp_type_idx type; in __reserve_bp_slot() local 587 type = find_slot_idx(bp_type); in __reserve_bp_slot() 609 enum bp_type_idx type; in __release_bp_slot() local 612 type = find_slot_idx(bp_type); in __release_bp_slot() 903 for (int type = 0; type < TYPE_MAX; ++type) { in hw_breakpoint_is_used() local [all …]
|
| /kernel/trace/ |
| A D | trace_export.c | 40 #define __field(type, item) type item; argument 43 #define __field_fn(type, item) type item; argument 46 #define __field_desc(type, container, item) type item; argument 49 #define __field_packed(type, container, item) type item; argument 52 #define __array(type, item, size) type item[size]; argument 55 #define __stack_array(type, item, size, field) __array(type, item, size) argument 58 #define __array_desc(type, container, item, size) type item[size]; argument 61 #define __dynamic_array(type, item) type item[]; argument 144 #define __field(type, item) argument 147 #define __field_fn(type, item) argument [all …]
|
| A D | trace_events_synth.c | 132 char *name, *type; in synth_event_define_fields() local 139 type = event->fields[i]->type; in synth_event_define_fields() 200 if (!end || end < start || type + strlen(type) > end + 1) in synth_field_string_size() 762 char *type; in parse_synth_field() local 766 if (!type) in parse_synth_field() 778 field->type = type; in parse_synth_field() 857 switch (type) { in synth_event_reg() 871 switch (type) { in synth_event_reg() 1210 if (!type) in __synth_event_gen_cmd_start() 2244 type = field->type; in __synth_event_show() [all …]
|
| A D | trace_probe.c | 115 (!strcmp(type, "symbol") || !strcmp(type, "symstr"))) in find_fetch_type() 118 if (!type) in find_fetch_type() 125 type = strchr(type, '/'); in find_fetch_type() 126 if (!type) in find_fetch_type() 129 type++; in find_fetch_type() 480 if (!type) in query_btf_context() 567 type = btf_type_skip_modifiers(ctx->btf, type->type, &tid); in parse_btf_field() 711 if (!type) { in parse_btf_arg() 1537 ret = finalize_fetch_insn(code, parg, type, type ? type - arg : 0, ctx); in traceprobe_parse_probe_arg_body() 1779 if (type && !(*type == ':' || *type == '\0')) { in traceprobe_expand_meta_args() [all …]
|
| A D | trace_events_user.c | 1045 if (strcmp(type, "s64") == 0) in user_field_size() 1065 if (strcmp(type, "s8") == 0) in user_field_size() 1067 if (strcmp(type, "u8") == 0) in user_field_size() 1149 field->type = type; in user_event_add_field() 1177 char *part, *type, *name; in user_event_parse_field() local 1216 type = field; in user_event_parse_field() 1230 type = part; in user_event_parse_field() 1308 if (strcmp(type, "s8") == 0) in user_field_format() 1310 if (strcmp(type, "u8") == 0) in user_field_format() 1714 enum trace_reg type, in user_event_reg() argument [all …]
|
| A D | trace_dynevent.c | 72 int dyn_event_release(const char *raw_command, struct dyn_event_operations *type) in dyn_event_release() argument 104 if (type && type != pos->ops) in dyn_event_release() 123 int dyn_event_create(const char *raw_command, struct dyn_event_operations *type) in dyn_event_create() argument 128 ret = type->create(raw_command); in dyn_event_create() 201 int dyn_events_release_all(struct dyn_event_operations *type) in dyn_events_release_all() argument 208 if (type && ev->ops != type) in dyn_events_release_all() 216 if (type && ev->ops != type) in dyn_events_release_all() 410 enum dynevent_type type, in dynevent_cmd_init() argument 416 cmd->type = type; in dynevent_cmd_init()
|
| /kernel/cgroup/ |
| A D | misc.c | 71 return type >= 0 && type < MISC_CG_RES_TYPES; in valid_type() 88 if (!valid_type(type)) in misc_cg_set_capacity() 109 misc_res_name[type]); in misc_cg_cancel_charge() 131 atomic64_inc(&cg->res[type].events); in misc_cg_event() 159 if (!(valid_type(type) && cg && READ_ONCE(misc_res_capacity[type]))) in misc_cg_try_charge() 166 res = &i->res[type]; in misc_cg_try_charge() 179 misc_cg_event(type, i); in misc_cg_try_charge() 183 misc_cg_cancel_charge(type, i, amount); in misc_cg_try_charge() 271 type = i; in misc_cg_max_write() 276 if (type == MISC_CG_RES_TYPES) in misc_cg_max_write() [all …]
|
| /kernel/time/ |
| A D | alarmtimer.c | 223 int i, ret, type; in alarmtimer_suspend() local 228 type = freezer_alarmtype; in alarmtimer_suspend() 251 type = i; in alarmtimer_suspend() 316 alarm->type = type; in __alarm_init() 468 switch(type) { in alarmtimer_freezerset() 471 type = ALARM_REALTIME_FREEZER; in alarmtimer_freezerset() 475 type = ALARM_BOOTTIME_FREEZER; in alarmtimer_freezerset() 488 freezer_alarmtype = type; in alarmtimer_freezerset() 661 enum alarmtimer_type type; in alarm_timer_create() local 699 enum alarmtimer_type type) in alarmtimer_do_nsleep() argument [all …]
|
| /kernel/ |
| A D | pid.c | 166 enum pid_type type; in alloc_pid() local 266 for (type = 0; type < PIDTYPE_MAX; ++type) in alloc_pid() 346 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); in attach_pid() 367 WARN_ON(pids[type]); in __change_pid() 368 pids[type] = pid; in __change_pid() 380 attach_pid(task, type); in change_pid() 410 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); in transfer_pid() 574 enum pid_type type; in pidfd_get_task() local 578 type = PIDTYPE_PID; in pidfd_get_task() 582 type = PIDTYPE_TGID; in pidfd_get_task() [all …]
|
| A D | ucount.c | 215 enum ucount_type type) in inc_ucount() argument 223 max = READ_ONCE(tns->ucount_max[type]); in inc_ucount() 231 atomic_long_dec(&iter->ucount[type]); in inc_ucount() 259 max = get_userns_rlimit_max(iter->ns, type); in inc_rlimit_ucounts() 278 struct ucounts *last, enum rlimit_type type) in do_dec_rlimit_put_ucounts() argument 292 do_dec_rlimit_put_ucounts(ucounts, NULL, type); in dec_rlimit_put_ucounts() 310 max = get_userns_rlimit_max(iter->ns, type); in inc_rlimit_get_ucounts() 322 dec = atomic_long_sub_return(1, &iter->rlimit[type]); in inc_rlimit_get_ucounts() 324 do_dec_rlimit_put_ucounts(ucounts, iter, type); in inc_rlimit_get_ucounts() 335 long val = get_rlimit_value(iter, type); in is_rlimit_overlimit() [all …]
|
| A D | jump_label.c | 432 key->type |= JUMP_TYPE_LINKED; in static_key_set_linked() 447 unsigned long type; in static_key_set_entries() local 450 type = key->type & JUMP_TYPE_MASK; in static_key_set_entries() 452 key->type |= type; in static_key_set_entries() 573 return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK); in static_key_sealed() 578 unsigned long type = key->type & JUMP_TYPE_TRUE; in static_key_seal() local 579 key->type = JUMP_TYPE_LINKED | type; in static_key_seal() 619 return type ^ branch; in jump_label_init_type() 643 unsigned long type; in static_key_set_mod() local 646 type = key->type & JUMP_TYPE_MASK; in static_key_set_mod() [all …]
|
| A D | auditsc.c | 91 int type; member 703 f->type, in audit_filter_rules() 1336 int type; in audit_log_time() local 1339 for (type = 0; type < AUDIT_NTP_NVALS; type++) { in audit_log_time() 1340 if (ntp->vals[type].newval != ntp->vals[type].oldval) { in audit_log_time() 2184 aname->type = type; in audit_alloc_name() 2463 (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) in __audit_inode_child() 2472 n->type = type; in __audit_inode_child() 2907 int type; in __audit_ntp_log() local 2909 for (type = 0; type < AUDIT_NTP_NVALS; type++) in __audit_ntp_log() [all …]
|
| /kernel/kcsan/ |
| A D | core.c | 233 if (type & KCSAN_ACCESS_ATOMIC) in is_atomic() 241 if (type & KCSAN_ACCESS_ASSERT) in is_atomic() 321 static void delay_access(int type) in delay_access() argument 402 int type, unsigned long ip) in find_reorder_access() argument 414 reorder_access->type == type && reorder_access->ip == ip; in find_reorder_access() 419 int type, unsigned long ip) in set_reorder_access() argument 435 reorder_access->type = type | KCSAN_ACCESS_SCOPED; in set_reorder_access() 457 int type, in kcsan_found_watchpoint() argument 615 delay_access(type); in kcsan_setup_watchpoint() 766 type = reorder_access->type; in check_access() [all …]
|
| /kernel/module/ |
| A D | tree_lookup.c | 80 for_each_mod_mem_type(type) { in mod_tree_insert() 81 mod->mem[type].mtn.mod = mod; in mod_tree_insert() 82 if (mod->mem[type].size) in mod_tree_insert() 83 __mod_tree_insert(&mod->mem[type].mtn, &mod_tree); in mod_tree_insert() 89 for_class_mod_mem_type(type, init) { in mod_tree_remove_init() 90 if (mod->mem[type].size) in mod_tree_remove_init() 91 __mod_tree_remove(&mod->mem[type].mtn, &mod_tree); in mod_tree_remove_init() 97 for_each_mod_mem_type(type) { in mod_tree_remove() 98 if (mod->mem[type].size) in mod_tree_remove() 99 __mod_tree_remove(&mod->mem[type].mtn, &mod_tree); in mod_tree_remove()
|
| A D | strict_rwx.c | 15 static int module_set_memory(const struct module *mod, enum mod_mem_type type, in module_set_memory() argument 18 const struct module_memory *mod_mem = &mod->mem[type]; in module_set_memory() 35 for_class_mod_mem_type(type, text) { in module_enable_text_rox() 36 const struct module_memory *mem = &mod->mem[type]; in module_enable_text_rox() 42 ret = module_set_memory(mod, type, set_memory_rox); in module_enable_text_rox() 44 ret = module_set_memory(mod, type, set_memory_x); in module_enable_text_rox() 81 for_class_mod_mem_type(type, data) { in module_enable_data_nx() 82 int ret = module_set_memory(mod, type, set_memory_nx); in module_enable_data_nx()
|
| A D | debug_kmemleak.c | 16 for_each_mod_mem_type(type) { in kmemleak_load_module() 17 if (type != MOD_DATA && type != MOD_INIT_DATA && in kmemleak_load_module() 18 !mod->mem[type].is_rox) in kmemleak_load_module() 19 kmemleak_no_scan(mod->mem[type].base); in kmemleak_load_module()
|
| /kernel/power/ |
| A D | qos.c | 63 switch (c->type) { in pm_qos_get_value() 220 .type = PM_QOS_MIN, 452 c->type = PM_QOS_MAX; in freq_constraints_init() 461 c->type = PM_QOS_MIN; in freq_constraints_init() 476 switch (type) { in freq_qos_read_value() 508 switch(req->type) { in freq_qos_apply() 552 req->type = type; in freq_qos_add_request() 556 req->type = 0; in freq_qos_add_request() 613 req->type = 0; in freq_qos_remove_request() 634 switch (type) { in freq_qos_add_notifier() [all …]
|