| /kernel/ |
| A D | pid.c | 59 .nr = 0, 167 int i, nr; in alloc_pid() local 223 nr = -EEXIST; in alloc_pid() 243 if (nr < 0) { in alloc_pid() 244 retval = (nr == -ENOSPC) ? -EAGAIN : nr; in alloc_pid() 248 pid->numbers[i].nr = nr; in alloc_pid() 492 pid_t nr = 0; in pid_nr_ns() local 497 nr = upid->nr; in pid_nr_ns() 499 return nr; in pid_nr_ns() 512 pid_t nr = 0; in __task_pid_nr_ns() local [all …]
|
| A D | softirq.c | 719 __raise_softirq_irqoff(nr); in raise_softirq_irqoff() 734 void raise_softirq(unsigned int nr) in raise_softirq() argument 739 raise_softirq_irqoff(nr); in raise_softirq() 743 void __raise_softirq_irqoff(unsigned int nr) in __raise_softirq_irqoff() argument 746 trace_softirq_raise(nr); in __raise_softirq_irqoff() 747 or_softirq_pending(1UL << nr); in __raise_softirq_irqoff() 750 void open_softirq(int nr, void (*action)(void)) in open_softirq() argument 752 softirq_vec[nr].action = action; in open_softirq() 1027 void raise_ktimers_thread(unsigned int nr) in raise_ktimers_thread() argument 1029 trace_softirq_raise(nr); in raise_ktimers_thread() [all …]
|
| A D | seccomp.c | 254 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data() 370 int syscall_nr = sd->nr; in seccomp_cache_check_allow() 765 if (sd->nr == __NR_uretprobe) in seccomp_is_const_allow() 778 reg_value = sd->nr; in seccomp_is_const_allow() 838 int nr; in seccomp_cache_prepare_bitmap() local 848 for (nr = 0; nr < bitmap_size; nr++) { in seccomp_cache_prepare_bitmap() 850 if (!test_bit(nr, bitmap)) in seccomp_cache_prepare_bitmap() 853 sd.nr = nr; in seccomp_cache_prepare_bitmap() 864 __clear_bit(nr, bitmap); in seccomp_cache_prepare_bitmap() 2504 int nr; in device_initcall() local [all …]
|
| A D | ptrace.c | 726 if (arg.nr < 0) in ptrace_peek_siginfo() 738 for (i = 0; i < arg.nr; ) { in ptrace_peek_siginfo() 924 info->entry.nr = syscall_get_nr(child, regs); in ptrace_get_syscall_info_entry() 1028 int nr = info->entry.nr; in ptrace_set_syscall_info_entry() local 1036 if (nr != info->entry.nr) in ptrace_set_syscall_info_entry() 1050 syscall_set_nr(child, regs, nr); in ptrace_set_syscall_info_entry() 1057 if (nr != -1) in ptrace_set_syscall_info_entry()
|
| A D | notifier.c | 118 int ret, nr = 0; in notifier_call_chain_robust() local 120 ret = notifier_call_chain(nl, val_up, v, -1, &nr); in notifier_call_chain_robust() 122 notifier_call_chain(nl, val_down, v, nr-1, NULL); in notifier_call_chain_robust()
|
| A D | pid_namespace.c | 193 int nr; in zap_pid_ns_processes() local 226 nr = 2; in zap_pid_ns_processes() 227 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { in zap_pid_ns_processes()
|
| A D | fork.c | 2571 pid_t nr; in kernel_clone() local 2618 nr = pid_vnr(pid); in kernel_clone() 2621 put_user(nr, args->parent_tid); in kernel_clone() 2648 return nr; in kernel_clone()
|
| /kernel/bpf/ |
| A D | stackmap.c | 22 u32 nr; member 238 if (trace->nr <= skip) in __bpf_get_stackid() 242 trace_nr = trace->nr - skip; in __bpf_get_stackid() 262 new_bucket->nr = trace_nr; in __bpf_get_stackid() 292 new_bucket->nr = trace_nr; in __bpf_get_stackid() 376 __u64 nr = trace->nr; in BPF_CALL_3() local 378 trace->nr = nr_kernel; in BPF_CALL_3() 382 trace->nr = nr; in BPF_CALL_3() 607 __u64 nr = trace->nr; in BPF_CALL_4() local 609 trace->nr = nr_kernel; in BPF_CALL_4() [all …]
|
| /kernel/locking/ |
| A D | lockdep_proc.c | 414 unsigned long nl, nr; in lock_stat_cmp() local 416 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; in lock_stat_cmp() 417 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr; in lock_stat_cmp() 419 return nr - nl; in lock_stat_cmp() 453 seq_printf(m, "%14lu", lt->nr); in seq_lock_time() 457 seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); in seq_lock_time() 507 if (stats->write_holdtime.nr) { in seq_stats() 508 if (stats->read_holdtime.nr) in seq_stats() 520 if (stats->read_holdtime.nr) { in seq_stats() 529 if (stats->read_waittime.nr + stats->write_waittime.nr == 0) in seq_stats() [all …]
|
| /kernel/trace/ |
| A D | trace_syscalls.c | 109 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) in syscall_nr_to_meta() 112 return syscalls_metadata[nr]; in syscall_nr_to_meta() 138 syscall = trace->nr; in print_syscall_enter() 184 syscall = trace->nr; in print_syscall_exit() 332 entry->nr = syscall_nr; in ftrace_syscall_enter() 376 entry->nr = syscall_nr; in ftrace_syscall_exit() 520 return (unsigned long)sys_call_table[nr]; in arch_syscall_addr() 583 param.syscall_nr = rec->nr; in perf_call_bpf_enter() 632 rec->nr = syscall_nr; in perf_syscall_enter() 694 param.syscall_nr = rec->nr; in perf_call_bpf_exit() [all …]
|
| A D | trace_btf.c | 43 const struct btf_param *btf_get_func_param(const struct btf_type *func_proto, s32 *nr) in btf_get_func_param() argument 48 *nr = btf_type_vlen(func_proto); in btf_get_func_param() 49 if (*nr > 0) in btf_get_func_param()
|
| A D | trace_btf.h | 7 s32 *nr);
|
| A D | trace_probe.c | 471 s32 nr; in query_btf_context() local 487 nr = 0; in query_btf_context() 488 param = btf_get_func_param(type, &nr); in query_btf_context() 492 nr--; in query_btf_context() 497 if (nr > 0) { in query_btf_context() 498 ctx->nr_params = nr; in query_btf_context()
|
| A D | trace_output.c | 702 s32 tid, nr = 0; in print_function_args() local 718 param = btf_get_func_param(t, &nr); in print_function_args() 722 for (a = 0, p = 0; p < nr; a++, p++) { in print_function_args()
|
| /kernel/unwind/ |
| A D | user.c | 113 trace->nr = 0; in unwind_user() 122 trace->entries[trace->nr++] = state.ip; in unwind_user() 123 if (trace->nr >= max_entries) in unwind_user()
|
| A D | deferred.c | 135 trace->nr = cache->nr_entries; in unwind_user_faultable() 139 trace->nr = 0; in unwind_user_faultable() 142 cache->nr_entries = trace->nr; in unwind_user_faultable() 168 trace.nr = 0; in process_unwind_deferred()
|
| /kernel/events/ |
| A D | callchain.c | 193 last_ip = &entry->ip[entry->nr - 1]; in fixup_uretprobe_trampoline_entries() 233 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 258 start_entry_idx = entry->nr; in get_perf_callchain()
|
| A D | hw_breakpoint.c | 391 int nr; in max_bp_pinned_slots() local 393 nr = info->cpu_pinned; in max_bp_pinned_slots() 395 nr += max_task_bp_pinned(cpu, type); in max_bp_pinned_slots() 397 nr += task_bp_pinned(cpu, bp, type); in max_bp_pinned_slots() 399 pinned_slots = max(nr, pinned_slots); in max_bp_pinned_slots()
|
| A D | core.c | 1424 u32 nr; in perf_event_pid_type() local 1434 nr = -1; in perf_event_pid_type() 1435 return nr; in perf_event_pid_type() 1930 int nr = 1; in __perf_event_read_size() local 3873 heap->nr++; in __heap_add() 3912 .nr = 0, in visit_groups_merge() 3925 .nr = 0, in visit_groups_merge() 7969 u64 nr = 0; in perf_output_sample() local 10414 u64 nr, offset; in perf_swevent_set_period() local 10430 return nr; in perf_swevent_set_period() [all …]
|
| /kernel/irq/ |
| A D | irqdesc.c | 159 unsigned int irq_set_nr_irqs(unsigned int nr) in irq_set_nr_irqs() argument 161 nr_irqs = nr; in irq_set_nr_irqs() 163 return nr; in irq_set_nr_irqs() 541 static bool irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument 543 if (nr > MAX_SPARSE_IRQS) in irq_expand_nr_irqs() 545 nr_irqs = nr; in irq_expand_nr_irqs() 645 static inline bool irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument
|
| /kernel/rcu/ |
| A D | rcuscale.c | 689 int nr; in compute_real() local 692 nr = n; in compute_real() 694 nr = num_online_cpus() + 1 + n; in compute_real() 695 if (nr <= 0) in compute_real() 696 nr = 1; in compute_real() 698 return nr; in compute_real()
|
| /kernel/sched/ |
| A D | wait.c | 158 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) in __wake_up_locked() argument 160 __wake_up_common(wq_head, mode, nr, 0, NULL); in __wake_up_locked()
|
| A D | ext.c | 1942 WRITE_ONCE(dsq->nr, dsq->nr + delta); in dsq_mod_nr() 3115 if (rq->scx.local_dsq.nr) in balance_one() 3146 if (rq->scx.local_dsq.nr) in balance_one() 4383 if (dsq->nr) { in destroy_dsq() 4385 dsq->id, dsq->nr); in destroy_dsq() 6965 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); in scx_bpf_dsq_nr_queued() 6971 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); in scx_bpf_dsq_nr_queued() 6977 ret = READ_ONCE(dsq->nr); in scx_bpf_dsq_nr_queued()
|
| /kernel/futex/ |
| A D | syscalls.c | 341 int, nr, in SYSCALL_DEFINE4() argument 354 return futex_wake(uaddr, FLAGS_STRICT | flags, nr, mask); in SYSCALL_DEFINE4()
|
| /kernel/time/ |
| A D | posix-timers.c | 91 static struct timer_hash_bucket *hash_bucket(struct signal_struct *sig, unsigned int nr) in hash_bucket() argument 93 return &timer_buckets[jhash2((u32 *)&sig, sizeof(sig) / sizeof(u32), nr) & timer_hashmask]; in hash_bucket()
|