| /kernel/trace/ |
| A D | trace_output.c | 51 trace_seq_bprintf(s, field->fmt, field->buf); in trace_print_bprintk_msg_only() 926 if (field) in print_array() 957 if (field->offset + field->size > iter->ent_size) { in print_fields() 1167 field->ip, in trace_fn_raw() 1475 field->timestamp.tv_nsec, field->count); in trace_hwlat_print() 1539 net_runtime = field->runtime - field->noise; in trace_osnoise_print() 1546 field->noise, in trace_osnoise_print() 1572 field->noise, in trace_osnoise_raw() 1698 trace_seq_bprintf(s, field->fmt, field->buf); in trace_bprint_print() 1714 trace_seq_bprintf(s, field->fmt, field->buf); in trace_bprint_raw() [all …]
|
| A D | trace_events_inject.c | 62 if (!field) in parse_field() 65 *pf = field; in parse_field() 79 if (is_string_field(field)) in parse_field() 94 if (field->is_signed) in parse_field() 108 if (!is_string_field(field)) in parse_field() 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 177 str_loc -= field->offset + field->size; in trace_alloc_entry() 220 strscpy(entry + field->offset, addr, field->size); in parse_entry() 238 str_loc -= field->offset + field->size; in parse_entry() [all …]
|
| A D | trace_events_synth.c | 657 kfree(field->type); in free_synth_field() 658 kfree(field->name); in free_synth_field() 659 kfree(field); in free_synth_field() 709 field = kzalloc(sizeof(*field), GFP_KERNEL); in parse_synth_field() 710 if (!field) in parse_synth_field() 719 if (!field->name) in parse_synth_field() 737 if (!field->type) in parse_synth_field() 788 field->size = size; in parse_synth_field() 795 field->is_signed = synth_field_signed(field->type); in parse_synth_field() 797 return field; in parse_synth_field() [all …]
|
| A D | trace_events_hist.c | 1344 if (field->field) in hist_field_name() 1345 field_name = field->field->name; in hist_field_name() 2075 hist_field->field = field; in create_hist_field() 2390 if (!field || !field->size) { in parse_field() 5079 field = hist_field->field; in create_tracing_map_fields() 5247 field = key_field->field; in add_to_key() 5877 if (field->field) in hist_field_debug_show() 5879 field->field->name); in hist_field_debug_show() 5936 if (field->field) in field_var_debug_show() 5938 field->field->name); in field_var_debug_show() [all …]
|
| A D | trace_events_user.c | 1104 kfree(field); in user_event_destroy_fields() 1116 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT); in user_event_add_field() 1118 if (!field) in user_event_add_field() 1138 kfree(field); in user_event_add_field() 1182 field = skip_spaces(field); in user_event_parse_field() 1216 type = field; in user_event_parse_field() 1217 field = strpbrk(field + len, " "); in user_event_parse_field() 1222 *field++ = '\0'; in user_event_parse_field() 1267 char *field; in user_event_parse_fields() local 1400 field->name, user_field_format(field->type)); in user_event_set_print_fmt() [all …]
|
| A D | trace_events.c | 105 if (field) in trace_find_event_field() 267 for (; field->type; field++) { in find_event_field() 2022 field->type, field->name, field->offset, in f_show() 2023 field->size, !!field->is_signed); in f_show() 2027 field->type, field->name, in f_show() 2028 field->len, field->offset, in f_show() 2029 field->size, !!field->is_signed); in f_show() 2033 field->type, field->name, in f_show() 2034 field->offset, field->size, !!field->is_signed); in f_show() 2869 for (; field->type; field++) { in event_define_fields() [all …]
|
| A D | trace_eprobe.c | 237 struct eprobe_trace_entry_head field; in eprobe_event_define_fields() local 289 (u8 *)&field[1], field) < 0) in print_eprobe_event() 304 addr = rec + field->offset; in get_event_field() 306 if (is_string_field(field)) { in get_event_field() 307 switch (field->filter_type) { in get_event_field() 327 switch (field->size) { in get_event_field() 329 if (field->is_signed) in get_event_field() 335 if (field->is_signed) in get_event_field() 341 if (field->is_signed) in get_event_field() 347 if (field->size == sizeof(long)) { in get_event_field() [all …]
|
| A D | trace_probe.c | 523 char *field; in split_next_field() local 527 if (field) { in split_next_field() 528 if (field[0] == '-' && field[1] == '>') { in split_next_field() 529 field[0] = '\0'; in split_next_field() 530 field += 2; in split_next_field() 533 field[0] = '\0'; in split_next_field() 534 field += 1; in split_next_field() 588 if (!field) { in parse_btf_field() 638 char *field = NULL; in parse_btf_arg() local 666 if (field) { in parse_btf_arg() [all …]
|
| A D | trace_events_filter.c | 83 struct ftrace_event_field *field; member 1672 if (!field) { in parse_pred() 1715 pred->field = field; in parse_pred() 1716 pred->offset = field->offset; in parse_pred() 1931 switch (field->size) { in parse_pred() 2055 if (field->is_signed) in parse_pred() 2070 field->is_signed); in parse_pred() 2625 struct ftrace_event_field *field = pred->field; in ftrace_function_check_pred() local 2827 struct ftrace_event_field *field = pred->field; in test_pred_visited_fn() local 2842 struct ftrace_event_field *field = pred->field; in update_pred_fn() local [all …]
|
| A D | trace_branch.c | 137 struct trace_branch *field; in trace_branch_print() local 139 trace_assign_type(field, iter->ent); in trace_branch_print() 142 field->correct ? " ok " : " MISS ", in trace_branch_print() 143 field->func, in trace_branch_print() 144 field->file, in trace_branch_print() 145 field->line); in trace_branch_print()
|
| A D | trace_mmiotrace.c | 168 struct trace_mmiotrace_rw *field; in mmio_print_rw() local 175 trace_assign_type(field, entry); in mmio_print_rw() 176 rw = &field->rw; in mmio_print_rw() 213 struct trace_mmiotrace_map *field; in mmio_print_map() local 220 trace_assign_type(field, entry); in mmio_print_map() 221 m = &field->map; in mmio_print_map()
|
| A D | trace_kprobe.c | 1186 const char *field; in __kprobe_event_gen_cmd_start() local 1188 field = va_arg(args, const char *); in __kprobe_event_gen_cmd_start() 1189 if (!field) in __kprobe_event_gen_cmd_start() 1197 arg.str = field; in __kprobe_event_gen_cmd_start() 1237 const char *field; in __kprobe_event_add_fields() local 1239 field = va_arg(args, const char *); in __kprobe_event_add_fields() 1240 if (!field) in __kprobe_event_add_fields() 1248 arg.str = field; in __kprobe_event_add_fields() 1591 (u8 *)&field[1], field) < 0) in print_kprobe_event() 1626 (u8 *)&field[1], field) < 0) in print_kretprobe_event() [all …]
|
| A D | trace_fprobe.c | 620 struct fentry_trace_entry_head *field; in print_fentry_event() local 624 field = (struct fentry_trace_entry_head *)iter->ent; in print_fentry_event() 632 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) in print_fentry_event() 638 (u8 *)&field[1], field) < 0) in print_fentry_event() 650 struct fexit_trace_entry_head *field; in print_fexit_event() local 654 field = (struct fexit_trace_entry_head *)iter->ent; in print_fexit_event() 673 (u8 *)&field[1], field) < 0) in print_fexit_event() 685 struct fentry_trace_entry_head field; in fentry_event_define_fields() local 694 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); in fentry_event_define_fields() 700 struct fexit_trace_entry_head field; in fexit_event_define_fields() local [all …]
|
| A D | tracing_map.c | 860 struct tracing_map_field *field; in cmp_entries_sum() local 873 field = &elt_a->fields[sort_key->field_idx]; in cmp_entries_sum() 874 cmp_fn = field->cmp_fn; in cmp_entries_sum() 891 struct tracing_map_field *field; in cmp_entries_key() local 904 field = &elt_a->fields[sort_key->field_idx]; in cmp_entries_key() 906 cmp_fn = field->cmp_fn; in cmp_entries_key() 908 val_a = elt_a->key + field->offset; in cmp_entries_key() 909 val_b = elt_b->key + field->offset; in cmp_entries_key()
|
| A D | trace_functions_graph.c | 1180 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, in print_graph_entry() argument 1202 memcpy(entry, field, iter->ent_size); in print_graph_entry() 1389 struct ftrace_graph_ent_entry *field; in print_graph_function_flags() local 1406 field = &data->ent.ent; in print_graph_function_flags() 1408 ret = print_graph_entry(field, s, iter, flags); in print_graph_function_flags() 1419 trace_assign_type(field, entry); in print_graph_function_flags() 1420 return print_graph_entry(field, s, iter, flags); in print_graph_function_flags() 1433 struct ftrace_graph_ret_entry *field; in print_graph_function_flags() local 1434 trace_assign_type(field, entry); in print_graph_function_flags() 1435 return print_graph_return(field, s, entry, iter, flags); in print_graph_function_flags()
|
| A D | trace.h | 90 #define __stack_array(type, item, size, field) type item[] __counted_by(field); argument 1650 static inline bool is_string_field(struct ftrace_event_field *field) in is_string_field() argument 1652 return field->filter_type == FILTER_DYN_STRING || in is_string_field() 1653 field->filter_type == FILTER_RDYN_STRING || in is_string_field() 1654 field->filter_type == FILTER_STATIC_STRING || in is_string_field() 1655 field->filter_type == FILTER_PTR_STRING || in is_string_field() 1656 field->filter_type == FILTER_COMM; in is_string_field() 1659 static inline bool is_function_field(struct ftrace_event_field *field) in is_function_field() argument 1661 return field->filter_type == FILTER_TRACE_FN; in is_function_field()
|
| A D | trace_export.c | 55 #define __stack_array(type, item, size, field) __array(type, item, size) argument 159 #define __stack_array(type, item, len, field) argument
|
| A D | trace_probe.h | 51 offsetof(typeof(field), item), \ 52 sizeof(field.item), is_signed, \ 366 u8 *data, void *field);
|
| /kernel/ |
| A D | rseq.c | 85 #define rseq_unsafe_put_user(t, value, field, error_label) \ argument 87 unsafe_put_user(value, &t->rseq->field, error_label); \ 88 rseq_kernel_fields(t)->field = value; \ 97 #define rseq_unsafe_put_user(t, value, field, error_label) \ argument 98 unsafe_put_user(value, &t->rseq->field, error_label)
|
| /kernel/module/ |
| A D | main.c | 580 #define MODINFO_ATTR(field) \ argument 583 mod->field = kstrdup(s, GFP_KERNEL); \ 588 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 592 return mod->field != NULL; \ 596 kfree(mod->field); \ 597 mod->field = NULL; \ 600 .attr = { .name = __stringify(field), .mode = 0444 }, \ 601 .show = show_modinfo_##field, \ 602 .setup = setup_modinfo_##field, \ 603 .test = modinfo_##field##_exists, \ [all …]
|
| A D | Kconfig | 244 field inserted into their modinfo section, which contains a 248 the version). With this option, such a "srcversion" field
|
| /kernel/bpf/ |
| A D | syscall.c | 170 const struct btf_field *field; in __bpf_obj_unpin_uptrs() local 174 for (i = 0, field = rec->fields; i < cnt; i++, field++) { in __bpf_obj_unpin_uptrs() 175 if (field->type != BPF_UPTR) in __bpf_obj_unpin_uptrs() 178 uptr_addr = obj + field->offset; in __bpf_obj_unpin_uptrs() 193 const struct btf_field *field; in bpf_obj_pin_uptrs() local 203 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { in bpf_obj_pin_uptrs() 204 if (field->type != BPF_UPTR) in bpf_obj_pin_uptrs() 212 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in bpf_obj_pin_uptrs() 639 struct btf_field *field; in btf_record_find() local 644 if (!field || !(field->type & field_mask)) in btf_record_find() [all …]
|
| A D | verifier.c | 334 struct btf_field *field; member 6001 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in mark_uptr_ld_reg() 9391 if (!field) in reg_find_field_offset() 9394 return field; in reg_find_field_offset() 10916 if (!field || !field->graph_root.value_btf_id) in set_rbtree_add_callback_state() 12864 if (!field) { in __process_kf_arg_ptr_to_graph_root() 12880 *head_field = field; in __process_kf_arg_ptr_to_graph_root() 12931 if (!field) { in __process_kf_arg_ptr_to_graph_node() 12936 field = *node_field; in __process_kf_arg_ptr_to_graph_node() 13669 struct btf_field *field = meta->arg_list_head.field; in check_special_kfunc() local [all …]
|
| A D | btf.c | 3826 field->kptr.dtor = NULL; in btf_parse_kptr() 3876 field->kptr.dtor = (void *)addr; in btf_parse_kptr() 3880 field->kptr.btf_id = id; in btf_parse_kptr() 3881 field->kptr.btf = kptr_btf; in btf_parse_kptr() 3882 field->kptr.module = mod; in btf_parse_kptr() 3892 struct btf_field *field, in btf_parse_graph_root() argument 3926 field->graph_root.btf = (struct btf *)btf; in btf_parse_graph_root() 3928 field->graph_root.node_offset = offset; in btf_parse_graph_root() 7210 struct btf_field *field = &rec->fields[i]; in btf_struct_access() local 7211 u32 offset = field->offset; in btf_struct_access() [all …]
|
| A D | helpers.c | 2101 void bpf_list_head_free(const struct btf_field *field, void *list_head, in bpf_list_head_free() argument 2126 obj -= field->graph_root.node_offset; in bpf_list_head_free() 2131 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_list_head_free() 2148 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, in bpf_rb_root_free() argument 2165 obj -= field->graph_root.node_offset; in bpf_rb_root_free() 2168 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_rb_root_free()
|