| /kernel/dma/ |
| A D | debug.c | 169 if (entry) { in dump_entry_trace() 359 if (entry) in bucket_find_contain() 533 &entry->paddr, entry->dev_addr, in debug_dma_dump_mappings() 566 &entry->paddr, entry->dev_addr, in dump_show() 596 err_printk(entry->dev, entry, in add_dma_entry() 607 if (!entry) in dma_debug_create_entries() 625 memset(entry, 0, sizeof(*entry)); in __dma_entry_alloc() 863 count, entry->dev_addr, entry->size, in dma_debug_device_change() 864 dir2name[entry->direction], type2name[entry->type]); in dma_debug_device_change() 1115 entry->dev_addr, entry->size, in check_sync() [all …]
|
| /kernel/ |
| A D | auditfilter.c | 111 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in audit_init_entry() 112 if (unlikely(!entry)) in audit_init_entry() 117 kfree(entry); in audit_init_entry() 122 return entry; in audit_init_entry() 273 if (!entry) in audit_to_entry_common() 300 return entry; in audit_to_entry_common() 461 if (IS_ERR(entry)) in audit_data_to_entry() 608 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) in audit_data_to_entry() 612 return entry; in audit_data_to_entry() 833 new = &entry->rule; in audit_dupe_rule() [all …]
|
| A D | async.c | 119 struct async_entry *entry = in async_run_entry_fn() local 129 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 132 (long long)entry->cookie, entry->func, in async_run_entry_fn() 141 kfree(entry); in async_run_entry_fn() 161 entry->func = func; in __async_schedule_node_domain() 162 entry->data = data; in __async_schedule_node_domain() 163 entry->domain = domain; in __async_schedule_node_domain() 203 struct async_entry *entry; in async_schedule_node_domain() local 215 kfree(entry); in async_schedule_node_domain() 262 struct async_entry *entry; in async_schedule_dev_nocall() local [all …]
|
| A D | jump_label.c | 378 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) in addr_conflict() 493 struct jump_entry *entry, in __jump_label_update() argument 497 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update() 499 arch_jump_label_transform(entry, jump_label_type(entry)); in __jump_label_update() 504 struct jump_entry *entry, in __jump_label_update() argument 508 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update() 513 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { in __jump_label_update() 518 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); in __jump_label_update() 899 struct jump_entry *entry; in jump_label_update() local 916 entry = static_key_entries(key); in jump_label_update() [all …]
|
| A D | kexec_core.c | 485 if (*image->entry != 0) in kimage_add_entry() 486 image->entry++; in kimage_add_entry() 498 image->entry = ind_page; in kimage_add_entry() 502 *image->entry = entry; in kimage_add_entry() 503 image->entry++; in kimage_add_entry() 504 *image->entry = 0; in kimage_add_entry() 538 if (*image->entry != 0) in kimage_terminate() 539 image->entry++; in kimage_terminate() 541 *image->entry = IND_DONE; in kimage_terminate() 545 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ [all …]
|
| A D | kexec.c | 22 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, in kimage_alloc_init() argument 34 if ((entry < phys_to_boot_phys(crashk_res.start)) || in kimage_alloc_init() 35 (entry > phys_to_boot_phys(crashk_res.end))) in kimage_alloc_init() 45 image->start = entry; in kimage_alloc_init() 91 static int do_kexec_load(unsigned long entry, unsigned long nr_segments, in do_kexec_load() argument 130 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 242 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, in SYSCALL_DEFINE4() argument 261 result = do_kexec_load(entry, nr_segments, ksegments, flags); in SYSCALL_DEFINE4() 268 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, in COMPAT_SYSCALL_DEFINE4() argument 303 result = do_kexec_load(entry, nr_segments, ksegments, flags); in COMPAT_SYSCALL_DEFINE4()
|
| /kernel/bpf/ |
| A D | mprog.c | 123 *entry_new = entry; in bpf_mprog_replace() 131 int total = bpf_mprog_total(entry); in bpf_mprog_insert() 136 peer = bpf_mprog_peer(entry); in bpf_mprog_insert() 137 bpf_mprog_entry_copy(peer, entry); in bpf_mprog_insert() 158 peer = bpf_mprog_peer(entry); in bpf_mprog_delete() 159 bpf_mprog_entry_copy(peer, entry); in bpf_mprog_delete() 281 idx = bpf_mprog_total(entry); in bpf_mprog_attach() 343 if (!bpf_mprog_total(entry)) in bpf_mprog_detach() 379 idx = bpf_mprog_total(entry); in bpf_mprog_detach() 410 if (entry) { in bpf_mprog_query() [all …]
|
| A D | tcx.c | 35 if (!entry) { in tcx_prog_attach() 48 bpf_mprog_commit(entry); in tcx_prog_attach() 50 tcx_entry_free(entry); in tcx_prog_attach() 74 if (!entry) { in tcx_prog_detach() 88 tcx_entry_free(entry); in tcx_prog_detach() 104 if (!entry) in tcx_uninstall() 119 tcx_entry_free(entry); in tcx_uninstall() 152 if (!entry) in tcx_link_prog_attach() 164 tcx_entry_free(entry); in tcx_link_prog_attach() 182 if (!entry) { in tcx_link_release() [all …]
|
| A D | dispatcher.c | 46 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_add_prog() local 51 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_add_prog() 52 if (entry) { in bpf_dispatcher_add_prog() 53 refcount_inc(&entry->users); in bpf_dispatcher_add_prog() 57 entry = bpf_dispatcher_find_free(d); in bpf_dispatcher_add_prog() 58 if (!entry) in bpf_dispatcher_add_prog() 62 entry->prog = prog; in bpf_dispatcher_add_prog() 63 refcount_set(&entry->users, 1); in bpf_dispatcher_add_prog() 71 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_remove_prog() local 77 if (!entry) in bpf_dispatcher_remove_prog() [all …]
|
| /kernel/trace/ |
| A D | trace_events_inject.c | 21 void *entry; in trace_inject_entry() local 25 if (entry) { in trace_inject_entry() 158 void *entry = NULL; in trace_alloc_entry() local 162 if (!entry) in trace_alloc_entry() 190 return entry; in trace_alloc_entry() 199 void *entry = NULL; in parse_entry() local 205 *pentry = entry; in parse_entry() 206 if (!entry) in parse_entry() 230 kfree(entry); in parse_entry() 233 entry = *pentry; in parse_entry() [all …]
|
| A D | trace_syscalls.c | 28 return &entry->enter_fields; in syscall_get_enter_fields() 117 struct syscall_metadata *entry; in get_syscall_name() local 120 if (!entry) in get_syscall_name() 123 return entry->name; in get_syscall_name() 141 if (!entry) in print_syscall_enter() 187 if (!entry) { in print_syscall_exit() 328 if (!entry) in ftrace_syscall_enter() 332 entry->nr = syscall_nr; in ftrace_syscall_enter() 371 entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry)); in ftrace_syscall_exit() 372 if (!entry) in ftrace_syscall_exit() [all …]
|
| A D | trace_functions_graph.c | 133 entry->graph_ent = *trace; in __graph_entry() 323 entry->ret = *trace; in __trace_graph_return() 325 entry->rettime = rettime; in __trace_graph_return() 462 trace_func_graph_ent_t entry; in ftrace_graph_trace_args() local 471 entry = trace_graph_entry; in ftrace_graph_trace_args() 479 tr->gops->entryfunc = entry; in ftrace_graph_trace_args() 855 if (entry) { in print_graph_retval() 916 call = &entry->graph_ent; in print_graph_entry_leaf() 1201 entry = (void *)save_buf; in print_graph_entry() 1204 call = &entry->graph_ent; in print_graph_entry() [all …]
|
| A D | trace_mmiotrace.c | 167 struct trace_entry *entry = iter->ent; in mmio_print_rw() local 175 trace_assign_type(field, entry); in mmio_print_rw() 212 struct trace_entry *entry = iter->ent; in mmio_print_map() local 220 trace_assign_type(field, entry); in mmio_print_map() 246 struct trace_entry *entry = iter->ent; in mmio_print_mark() local 298 struct trace_mmiotrace_rw *entry; in __trace_mmiotrace_rw() local 303 sizeof(*entry), trace_ctx); in __trace_mmiotrace_rw() 309 entry->rw = *rw; in __trace_mmiotrace_rw() 325 struct trace_mmiotrace_map *entry; in __trace_mmiotrace_map() local 330 sizeof(*entry), trace_ctx); in __trace_mmiotrace_map() [all …]
|
| A D | trace_events_synth.c | 344 struct synth_trace_event *entry; in print_synth_event() local 399 &entry->fields[n_u64], in print_synth_event() 440 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry); in trace_string() 503 struct synth_trace_event *entry; in trace_event_raw_event_synth() local 542 sizeof(*entry) + fields_size); in trace_event_raw_event_synth() 543 if (!entry) in trace_event_raw_event_synth() 1701 if (!trace_state->entry) { in __synth_event_trace_start() 1970 struct synth_trace_event *entry; in __synth_event_add_val() local 2016 entry = trace_state->entry; in __synth_event_add_val() 2338 struct dentry *entry = NULL; in trace_events_synth_init() local [all …]
|
| A D | trace_output.c | 33 struct trace_entry *entry = iter->ent; in trace_print_bputs_msg_only() local 36 trace_assign_type(field, entry); in trace_print_bputs_msg_only() 49 trace_assign_type(field, entry); in trace_print_bprintk_msg_only() 62 trace_assign_type(field, entry); in trace_print_printk_msg_only() 306 struct trace_entry *entry; in trace_raw_output_prep() local 309 entry = iter->ent; in trace_raw_output_prep() 513 if (entry->preempt_count & 0xf) in trace_print_lat_fmt() 534 comm, entry->pid, cpu); in lat_print_generic() 651 trace_print_lat_fmt(s, entry); in trace_print_context() 672 entry = iter->ent; in trace_print_lat_context() [all …]
|
| A D | trace_sched_wakeup.c | 399 struct ctx_switch_entry *entry; in tracing_sched_switch_trace() local 402 sizeof(*entry), trace_ctx); in tracing_sched_switch_trace() 406 entry->prev_pid = prev->pid; in tracing_sched_switch_trace() 407 entry->prev_prio = prev->prio; in tracing_sched_switch_trace() 409 entry->next_pid = next->pid; in tracing_sched_switch_trace() 410 entry->next_prio = next->prio; in tracing_sched_switch_trace() 424 struct ctx_switch_entry *entry; in tracing_sched_wakeup_trace() local 432 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace() 433 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace() 435 entry->next_pid = wakee->pid; in tracing_sched_wakeup_trace() [all …]
|
| A D | trace_fprobe.c | 339 if (!entry) in NOKPROBE_SYMBOL() 343 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); in NOKPROBE_SYMBOL() 344 entry->ip = entry_ip; in NOKPROBE_SYMBOL() 345 store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize); in NOKPROBE_SYMBOL() 425 if (!entry) in NOKPROBE_SYMBOL() 429 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); in NOKPROBE_SYMBOL() 430 entry->func = entry_ip; in NOKPROBE_SYMBOL() 470 if (!entry) in fentry_perf_func() 475 entry->ip = entry_ip; in fentry_perf_func() 477 store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize); in fentry_perf_func() [all …]
|
| A D | ftrace.c | 1193 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in add_hash_entry() 1194 if (!entry) in add_hash_entry() 2580 if (!entry) in ftrace_find_rec_direct() 4790 if (entry) in enter_record() 5251 if (!entry) in ftrace_func_mapper_find_ip() 5273 if (entry) in ftrace_func_mapper_add_ip() 5306 if (!entry) in ftrace_func_mapper_remove_ip() 6632 struct ftrace_func_entry *entry = fgd->entry; in __g_next() local 6641 fgd->entry = entry; in __g_next() 6651 fgd->entry = entry; in __g_next() [all …]
|
| A D | ring_buffer_benchmark.c | 88 int *entry; in read_event() local 95 entry = ring_buffer_event_data(event); in read_event() 96 if (*entry != cpu) { in read_event() 112 int *entry; in read_page() local 147 entry = ring_buffer_event_data(event); in read_page() 148 if (*entry != cpu) { in read_page() 160 entry = ring_buffer_event_data(event); in read_page() 161 if (*entry != cpu) { in read_page() 252 int *entry; in ring_buffer_producer() local 261 entry = ring_buffer_event_data(event); in ring_buffer_producer() [all …]
|
| A D | tracing_map.c | 520 struct tracing_map_entry *entry; in __tracing_map_insert() local 531 test_key = entry->key; in __tracing_map_insert() 534 val = READ_ONCE(entry->val); in __tracing_map_insert() 572 entry->key = 0; in __tracing_map_insert() 582 WRITE_ONCE(entry->val, elt); in __tracing_map_insert() 585 return entry->val; in __tracing_map_insert() 920 if (!entry) in destroy_sort_entry() 923 if (entry->elt_copied) in destroy_sort_entry() 926 kfree(entry); in destroy_sort_entry() 1088 if (!entry->key || !entry->val) in tracing_map_sort_entries() [all …]
|
| /kernel/printk/ |
| A D | index.c | 46 struct pi_entry *entry = pi_get_entry(mod, *pos); in pi_next() local 50 return entry; in pi_next() 75 const struct pi_entry *entry = v; in pi_show() local 85 if (!entry->fmt) in pi_show() 88 if (entry->level) in pi_show() 89 printk_parse_prefix(entry->level, &level, &flags); in pi_show() 91 prefix_len = printk_parse_prefix(entry->fmt, &level, &flags); in pi_show() 107 seq_printf(s, " %s:%d %s \"", entry->file, entry->line, entry->func); in pi_show() 108 if (entry->subsys_fmt_prefix) in pi_show() 109 seq_escape_printf_format(s, entry->subsys_fmt_prefix); in pi_show() [all …]
|
| /kernel/power/ |
| A D | console.c | 49 struct pm_vt_switch *entry, *tmp; in pm_vt_switch_required() local 60 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in pm_vt_switch_required() 61 if (!entry) in pm_vt_switch_required() 64 entry->required = required; in pm_vt_switch_required() 65 entry->dev = dev; in pm_vt_switch_required() 67 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required() 110 struct pm_vt_switch *entry; in pm_vt_switch() local 120 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch() 121 if (entry->required) in pm_vt_switch()
|
| /kernel/events/ |
| A D | callchain.c | 45 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, in perf_callchain_user() argument 192 cur_ip = &entry->ip[start_entry_idx]; in fixup_uretprobe_trampoline_entries() 193 last_ip = &entry->ip[entry->nr - 1]; in fixup_uretprobe_trampoline_entries() 223 struct perf_callchain_entry *entry; in get_perf_callchain() local 227 entry = get_callchain_entry(&rctx); in get_perf_callchain() 228 if (!entry) in get_perf_callchain() 231 ctx.entry = entry; in get_perf_callchain() 233 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 258 start_entry_idx = entry->nr; in get_perf_callchain() 260 fixup_uretprobe_trampoline_entries(entry, start_entry_idx); in get_perf_callchain() [all …]
|
| /kernel/sched/ |
| A D | wait.c | 64 list_add(&wq_entry->entry, head); in add_wait_queue_priority_exclusive() 101 if (&curr->entry == &wq_head->head) in __wake_up_common() 254 if (list_empty(&wq_entry->entry)) in prepare_to_wait() 270 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive() 285 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry() 308 list_del_init(&wq_entry->entry); in prepare_to_wait_event() 311 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event() 334 if (likely(list_empty(&wait->entry))) in do_wait_intr() 351 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq() 395 list_del_init(&wq_entry->entry); in finish_wait() [all …]
|
| /kernel/locking/ |
| A D | lockdep.c | 1435 if (!entry) in add_lock_to_list() 1440 entry->dep = dep; in add_lock_to_list() 1448 list_add_tail_rcu(&entry->entry, head); in add_lock_to_list() 1703 &lock->entry, struct lock_list, entry); in __bfs_next() 1815 list_for_each_entry_rcu(entry, head, entry) { in __bfs() 2430 entry = get_lock_parent(entry); in print_shortest_lock_dependencies() 2485 entry = get_lock_parent(entry); in print_shortest_lock_dependencies_backwards() 4106 entry = get_lock_parent(entry); in print_irq_inversion_bug() 4108 } while (entry && entry != root && (depth >= 0)); in print_irq_inversion_bug() 6257 if (entry->class != class && entry->links_to != class) in zap_class() [all …]
|