| /kernel/ |
| A D | rseq.c | 41 struct rseq __user *rseq = t->rseq; in rseq_validate_ro_fields() 65 t->pid, t->comm, in rseq_validate_ro_fields() 166 u32 mm_cid = task_mm_cid(t); in rseq_update_cpu_node_id() 171 if (rseq_validate_ro_fields(t)) in rseq_update_cpu_node_id() 188 trace_rseq_update(t); in rseq_update_cpu_node_id() 206 if (rseq_validate_ro_fields(t)) in rseq_reset_rseq_cpu_node_id() 348 event_mask = t->rseq_event_mask; in rseq_need_restart() 349 t->rseq_event_mask = 0; in rseq_need_restart() 400 return clear_rseq_cs(t->rseq); in rseq_ip_fixup() 404 ret = clear_rseq_cs(t->rseq); in rseq_ip_fixup() [all …]
|
| A D | kcov.c | 216 t = current; in __sanitizer_cov_trace_pc() 245 t = current; in write_comp_data() 382 kcov_stop(t); in kcov_task_reset() 454 kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); in kcov_task_exit() 477 if (WARN_ON(kcov->t != t)) { in kcov_task_exit() 605 t = current; in kcov_ioctl_locked() 606 if (kcov->t != NULL || t->kcov != NULL) in kcov_ioctl_locked() 615 kcov->t = t; in kcov_ioctl_locked() 625 if (WARN_ON(kcov->t != t)) in kcov_ioctl_locked() 634 if (kcov->t != NULL || t->kcov != NULL) in kcov_ioctl_locked() [all …]
|
| A D | hung_task.c | 164 task->comm, task->pid, t->comm, t->pid); in debug_show_blocker() 168 task->comm, task->pid, t->comm, t->pid); in debug_show_blocker() 177 sched_show_task(t); in debug_show_blocker() 189 unsigned long switch_count = t->nvcsw + t->nivcsw; in check_hung_task() 236 t->comm, t->pid, (jiffies - t->last_switch_time) / HZ); in check_hung_task() 245 sched_show_task(t); in check_hung_task() 246 debug_show_blocker(t); in check_hung_task() 270 get_task_struct(t); in rcu_lock_break() 275 put_task_struct(t); in rcu_lock_break() 482 long t; in watchdog() local [all …]
|
| A D | softirq.c | 775 t->next = NULL; in __tasklet_schedule_common() 803 t->use_callback ? (void *)t->callback : (void *)t->func); in tasklet_clear_sched() 828 trace_tasklet_entry(t, t->callback); in tasklet_action_common() 829 t->callback(t); in tasklet_action_common() 830 trace_tasklet_exit(t, t->callback); in tasklet_action_common() 832 trace_tasklet_entry(t, t->func); in tasklet_action_common() 833 t->func(t->data); in tasklet_action_common() 834 trace_tasklet_exit(t, t->func); in tasklet_action_common() 868 t->state = 0; in tasklet_setup() 872 t->data = 0; in tasklet_setup() [all …]
|
| A D | signal.c | 113 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) in sig_ignored() 162 PENDING(&t->pending, &t->blocked) || in recalc_sigpending_tsk() 163 PENDING(&t->signal->shared_pending, &t->blocked) || in recalc_sigpending_tsk() 512 flush_signals(t); in ignore_signals() 735 kick_process(t); in signal_wake_up_state() 858 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); in ptrace_trap_notify() 974 t = p; in complete_signal() 987 t = next_thread(t); in complete_signal() 1972 t = current; in posixtimer_get_target() 1973 return t; in posixtimer_get_target() [all …]
|
| A D | kallsyms_selftest.c | 158 u64 t0, t1, t; in lookup_name() local 165 t = t1 - t0; in lookup_name() 166 if (t < stat->min) in lookup_name() 167 stat->min = t; in lookup_name() 169 if (t > stat->max) in lookup_name() 170 stat->max = t; in lookup_name() 173 stat->sum += t; in lookup_name() 436 struct task_struct *t; in kallsyms_test_init() local 438 t = kthread_run_on_cpu(test_entry, NULL, 0, "kallsyms_test"); in kallsyms_test_init() 439 if (IS_ERR(t)) { in kallsyms_test_init() [all …]
|
| /kernel/bpf/ |
| A D | btf.c | 521 return !t || btf_type_nosize(t); in btf_type_nosize_or_null() 619 t = btf_type_by_id(btf, t->type); in btf_type_skip_modifiers() 914 if (!t || !btf_type_is_int(t)) in btf_member_is_reg_int() 949 t = btf_type_by_id(btf, t->type); in btf_type_skip_qualifiers() 1893 v->t = t; in env_stack_push() 2637 const struct btf_type *t = v->t; in btf_modifier_resolve() local 2680 const struct btf_type *t = v->t; in btf_var_resolve() local 2726 const struct btf_type *t = v->t; in btf_ptr_resolve() local 4904 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && in btf_float_check_meta() 8433 if (!t || !btf_type_is_ptr(t)) in BTF_ID_LIST_GLOBAL() [all …]
|
| A D | bpf_struct_ops.c | 312 const struct btf_type *t; in is_module_member() local 315 if (!t) in is_module_member() 318 if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t)) in is_module_member() 338 const struct btf_type *t; in bpf_struct_ops_desc_init() local 387 st_ops_desc->type = t; in bpf_struct_ops_desc_init() 392 for_each_member(i, t, member) { in bpf_struct_ops_desc_init() 551 for_each_member(i, t, member) { in check_zero_holes() 564 if (t->size > prev_mend && in check_zero_holes() 1021 for_each_member(i, t, member) in count_func_ptrs() 1032 const struct btf_type *t, *vt; in bpf_struct_ops_map_alloc() local [all …]
|
| /kernel/rcu/ |
| A D | tasks.h | 1018 if (t != current && rcu_tasks_is_holdout(t)) { in rcu_tasks_pertask() 1020 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask() 1100 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task() 1103 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { in check_holdout_task() 1118 t, ".I"[is_idle_task(t)], in check_holdout_task() 1120 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task() 1690 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader() 1780 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) in rcu_tasks_trace_pertask_prep() 1884 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task() 1906 t->pid, in show_stalled_task_trace() [all …]
|
| A D | tree_plugin.h | 340 t->rcu_blocked_node = rnp; in rcu_note_context_switch() 350 t->pid, in rcu_note_context_switch() 356 rcu_preempt_deferred_qs(t); in rcu_note_context_switch() 532 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore() 544 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore() 684 if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) in rcu_unlock_needs_exp_handling() 787 struct task_struct *t; in rcu_preempt_check_blocked_tasks() local 799 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks() 820 set_tsk_need_resched(t); in rcu_flavor_sched_clock_irq() 1160 struct task_struct *t; in rcu_boost() local [all …]
|
| A D | tree_stall.h | 277 struct task_struct *t; in rcu_print_detail_task_stall_rnp() local 292 sched_show_task(t); in rcu_print_detail_task_stall_rnp() 312 if (task_curr(t)) in check_slow_task() 330 struct task_struct *t; in rcu_print_task_stall() local 343 get_task_struct(t); in rcu_print_task_stall() 344 ts[i++] = t; in rcu_print_task_stall() 350 t = ts[--i]; in rcu_print_task_stall() 352 pr_cont(" P%d", t->pid); in rcu_print_task_stall() 355 t->pid, rscr.nesting, in rcu_print_task_stall() 361 put_task_struct(t); in rcu_print_task_stall() [all …]
|
| A D | tree_exp.h | 542 int t; in synchronize_rcu_expedited_wait_once() local 545 t = swait_event_timeout_exclusive(rcu_state.expedited_wq, in synchronize_rcu_expedited_wait_once() 549 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) in synchronize_rcu_expedited_wait_once() 751 struct task_struct *t = current; in rcu_exp_handler() local 792 t->rcu_read_unlock_special.b.exp_hint = true; in rcu_exp_handler() 811 struct task_struct *t; in rcu_print_task_exp_stall() local 818 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall() 821 pr_cont(" P%d", t->pid); in rcu_print_task_exp_stall() 836 struct task_struct *t; in rcu_exp_print_detail_task_stall_rnp() local 845 t = list_entry(rnp->exp_tasks->prev, in rcu_exp_print_detail_task_stall_rnp() [all …]
|
| /kernel/time/ |
| A D | timeconst.bc | 6 auto t; 8 t = b; 10 a = t; 77 print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n" 78 print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n" 79 print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n" 80 print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n" 100 print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n" 101 print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n" 106 print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n" [all …]
|
| A D | sleep_timeout.c | 23 static void process_timeout(struct timer_list *t) in process_timeout() argument 25 struct process_timer *timeout = timer_container_of(timeout, t, timer); in process_timeout() 192 struct hrtimer_sleeper t; in schedule_hrtimeout_range_clock() local 211 hrtimer_setup_sleeper_on_stack(&t, clock_id, mode); in schedule_hrtimeout_range_clock() 212 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); in schedule_hrtimeout_range_clock() 213 hrtimer_sleeper_start_expires(&t, mode); in schedule_hrtimeout_range_clock() 215 if (likely(t.task)) in schedule_hrtimeout_range_clock() 218 hrtimer_cancel(&t.timer); in schedule_hrtimeout_range_clock() 219 destroy_hrtimer_on_stack(&t.timer); in schedule_hrtimeout_range_clock() 223 return !t.task ? 0 : -EINTR; in schedule_hrtimeout_range_clock()
|
| A D | posix-stubs.c | 98 struct timespec64 t; in SYSCALL_DEFINE4() local 110 if (get_timespec64(&t, rqtp)) in SYSCALL_DEFINE4() 112 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 119 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4() 181 struct timespec64 t; in SYSCALL_DEFINE4() local 193 if (get_old_timespec32(&t, rqtp)) in SYSCALL_DEFINE4() 195 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 202 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4()
|
| A D | timekeeping_debug.c | 49 void tk_debug_account_sleep_time(const struct timespec64 *t) in tk_debug_account_sleep_time() argument 52 int bin = min(fls(t->tv_sec), NUM_BINS-1); in tk_debug_account_sleep_time() 56 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); in tk_debug_account_sleep_time()
|
| /kernel/sched/ |
| A D | stats.h | 245 if (!t->sched_info.last_queued) in sched_info_dequeue() 249 t->sched_info.last_queued = 0; in sched_info_dequeue() 250 t->sched_info.run_delay += delta; in sched_info_dequeue() 253 if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) in sched_info_dequeue() 267 if (!t->sched_info.last_queued) in sched_info_arrive() 272 t->sched_info.last_queued = 0; in sched_info_arrive() 275 t->sched_info.pcount++; in sched_info_arrive() 278 if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) in sched_info_arrive() 291 if (!t->sched_info.last_queued) in sched_info_enqueue() 309 if (task_is_running(t)) in sched_info_depart() [all …]
|
| A D | psi.c | 509 if (now < t->last_event_time + t->win.size) in update_triggers() 827 for (t = 0, m = clear; m; m &= ~(1 << t), t++) { in psi_group_change() 841 for (t = 0; set; set &= ~(1 << t), t++) in psi_group_change() 1343 t = kmalloc(sizeof(*t), GFP_KERNEL); in psi_trigger_create() 1344 if (!t) in psi_trigger_create() 1394 return t; in psi_trigger_create() 1406 if (!t) in psi_trigger_destroy() 1415 if (t->of) in psi_trigger_destroy() 1479 kfree(t); in psi_trigger_destroy() 1492 if (!t) in psi_trigger_poll() [all …]
|
| A D | completion.c | 221 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); in wait_for_completion_interruptible() local 223 if (t == -ERESTARTSYS) in wait_for_completion_interruptible() 224 return t; in wait_for_completion_interruptible() 259 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); in wait_for_completion_killable() local 261 if (t == -ERESTARTSYS) in wait_for_completion_killable() 262 return t; in wait_for_completion_killable() 269 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state); in wait_for_completion_state() local 271 if (t == -ERESTARTSYS) in wait_for_completion_state() 272 return t; in wait_for_completion_state()
|
| A D | cputime.c | 301 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime() 302 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime() 303 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime() 317 struct task_struct *t; in thread_group_cputime() local 342 for_each_thread(tsk, t) { in thread_group_cputime() 825 return t->gtime; in task_gtime() 830 gtime = t->gtime; in task_gtime() 852 *utime = t->utime; in task_cputime() 853 *stime = t->stime; in task_cputime() 861 *utime = t->utime; in task_cputime() [all …]
|
| /kernel/trace/ |
| A D | fgraph.c | 163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) argument 1111 t->curr_ret_stack = 0; in graph_init_task() 1112 t->curr_ret_depth = -1; in graph_init_task() 1124 t->curr_ret_stack = 0; in ftrace_graph_init_idle_task() 1125 t->curr_ret_depth = -1; in ftrace_graph_init_idle_task() 1130 if (t->ret_stack) in ftrace_graph_init_idle_task() 1154 t->ret_stack = NULL; in ftrace_graph_init_task() 1155 t->curr_ret_stack = 0; in ftrace_graph_init_task() 1156 t->curr_ret_depth = -1; in ftrace_graph_init_task() 1175 t->ret_stack = NULL; in ftrace_graph_exit_task() [all …]
|
| A D | blktrace.c | 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); in trace_note() 96 if (t) { in trace_note() 102 t->pid = pid; in trace_note() 106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); in trace_note() 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len); in trace_note() 277 if (t) { in __blk_add_trace() 301 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); in __blk_add_trace() 1218 MAJOR(t->device), MINOR(t->device), iter->cpu, in blk_log_action_classic() 1238 MAJOR(t->device), MINOR(t->device), in blk_log_action() 1256 MAJOR(t->device), MINOR(t->device), in blk_log_action() [all …]
|
| A D | trace_btf.c | 15 const struct btf_type *t; in btf_find_func_proto() local 23 t = btf_type_by_id(*btf_p, id); in btf_find_func_proto() 24 if (!t || !btf_type_is_func(t)) in btf_find_func_proto() 28 t = btf_type_by_id(*btf_p, t->type); in btf_find_func_proto() 29 if (!t || !btf_type_is_func_proto(t)) in btf_find_func_proto() 32 return t; in btf_find_func_proto()
|
| A D | trace_mmiotrace.c | 171 unsigned long long t = ns2usecs(iter->ts); in mmio_print_rw() local 172 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_rw() 173 unsigned secs = (unsigned long)t; in mmio_print_rw() 216 unsigned long long t = ns2usecs(iter->ts); in mmio_print_map() local 217 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_map() 218 unsigned secs = (unsigned long)t; in mmio_print_map() 250 unsigned long long t = ns2usecs(iter->ts); in mmio_print_mark() local 251 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_mark() 252 unsigned secs = (unsigned long)t; in mmio_print_mark()
|
| /kernel/futex/ |
| A D | syscalls.c | 147 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) in futex_init_timeout() argument 152 *t = timespec64_to_ktime(*ts); in futex_init_timeout() 154 *t = ktime_add_safe(ktime_get(), *t); in futex_init_timeout() 156 *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); in futex_init_timeout() 165 ktime_t t, *tp = NULL; in SYSCALL_DEFINE6() local 173 ret = futex_init_timeout(cmd, op, &ts, &t); in SYSCALL_DEFINE6() 176 tp = &t; in SYSCALL_DEFINE6() 497 ktime_t t, *tp = NULL; in SYSCALL_DEFINE6() local 503 ret = futex_init_timeout(cmd, op, &ts, &t); in SYSCALL_DEFINE6() 506 tp = &t; in SYSCALL_DEFINE6()
|