| /kernel/locking/ |
| A D | lockdep.c | 1966 curr->comm, task_pid_nr(curr)); in print_circular_bug_header() 2570 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency() 3026 curr->comm, task_pid_nr(curr)); in print_deadlock_bug() 3371 (prev) = (curr), (curr) = chain_block_next(curr)) 4025 curr->comm, task_pid_nr(curr), in print_usage_bug() 4087 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug() 4786 if (curr->hardirq_threaded || curr->irq_config) in task_wait_context() 5289 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug() 6021 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug() 6688 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug() [all …]
|
| A D | osq_lock.c | 58 int curr = encode_cpu(smp_processor_id()); in osq_wait_next() local 61 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 62 atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) { in osq_wait_next() 97 int curr = encode_cpu(smp_processor_id()); in osq_lock() local 102 node->cpu = curr; in osq_lock() 110 old = atomic_xchg(&lock->tail, curr); in osq_lock() 213 int curr = encode_cpu(smp_processor_id()); in osq_unlock() local 218 if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL)) in osq_unlock()
|
| A D | mutex.c | 89 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local 98 if (task != curr) in __mutex_trylock_common() 110 task = curr; in __mutex_trylock_common() 114 if (task == curr) in __mutex_trylock_common() 152 unsigned long curr = (unsigned long)current; in __mutex_trylock_fast() local 157 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast() 165 unsigned long curr = (unsigned long)current; in __mutex_unlock_fast() local 167 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
|
| /kernel/sched/ |
| A D | swait.c | 24 struct swait_queue *curr; in swake_up_locked() local 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked() 30 try_to_wake_up(curr->task, TASK_NORMAL, wake_flags); in swake_up_locked() 31 list_del_init(&curr->task_list); in swake_up_locked() 64 struct swait_queue *curr; in swake_up_all() local 70 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all() 72 wake_up_state(curr->task, TASK_NORMAL); in swake_up_all() 73 list_del_init(&curr->task_list); in swake_up_all()
|
| A D | cputime.c | 447 *ut = curr->utime; in cputime_adjust() 448 *st = curr->stime; in cputime_adjust() 554 rtime = curr->sum_exec_runtime; in cputime_adjust() 567 stime = curr->stime; in cputime_adjust() 568 utime = curr->utime; in cputime_adjust() 988 struct task_struct *curr; in kcpustat_field() local 991 curr = rcu_dereference(rq->curr); in kcpustat_field() 992 if (WARN_ON_ONCE(!curr)) { in kcpustat_field() 1075 struct task_struct *curr; in kcpustat_cpu_fetch() local 1078 curr = rcu_dereference(rq->curr); in kcpustat_cpu_fetch() [all …]
|
| A D | rt.c | 964 return rt_rq->highest_prio.curr; in rt_se_prio() 1085 rt_rq->highest_prio.curr = prio; in inc_rt_prio() 1106 rt_rq->highest_prio.curr = in dec_rt_prio() 1493 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt() 1501 struct task_struct *curr, *donor; in select_task_rq_rt() local 1512 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt() 1541 test = curr && in select_task_rq_rt() 1976 cpu = find_lowest_rq(rq->curr); in push_rt_task() 1999 if (WARN_ON(next_task == rq->curr)) in push_rt_task() 2272 this_rq->rt.highest_prio.curr) in pull_rt_task() [all …]
|
| A D | wait.c | 95 wait_queue_entry_t *curr, *next; in __wake_up_common() local 99 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 101 if (&curr->entry == &wq_head->head) in __wake_up_common() 104 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 105 unsigned flags = curr->flags; in __wake_up_common() 108 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
|
| A D | fair.c | 656 struct sched_entity *curr = cfs_rq->curr; in avg_vruntime() local 660 if (curr && curr->on_rq) { in avg_vruntime() 724 struct sched_entity *curr = cfs_rq->curr; in vruntime_eligible() local 728 if (curr && curr->on_rq) { in vruntime_eligible() 787 if (curr && curr->on_rq) in cfs_rq_min_slice() 956 return curr && curr->on_rq ? curr : se; in __pick_eevdf() 958 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) in __pick_eevdf() 3773 bool curr = cfs_rq->curr == se; in reweight_entity() local 5204 if (curr && curr->on_rq) in place_entity() 5244 bool curr = cfs_rq->curr == se; in enqueue_entity() local [all …]
|
| A D | deadline.c | 1202 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl)) in dl_server_timer() 1595 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) in dl_server_start() 1815 if (dl_rq->earliest_dl.curr == 0 || in inc_dl_deadline() 1817 if (dl_rq->earliest_dl.curr == 0) in inc_dl_deadline() 1833 dl_rq->earliest_dl.curr = 0; in dec_dl_deadline() 2163 rq->curr->dl.dl_yielded = 1; in yield_task_dl() 2188 struct task_struct *curr, *donor; in select_task_rq_dl() local 2198 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_dl() 2211 (curr->nr_cpus_allowed < 2 || in select_task_rq_dl() 2788 WARN_ON(p == src_rq->curr); in pull_dl_task() [all …]
|
| A D | core.c | 1098 struct task_struct *curr = rq->curr; in __resched_curr() local 5504 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start() local 5506 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start() 5508 prefetch(curr); in prefetch_curr_exec_start() 5710 struct task_struct *curr = rq->curr; in sched_tick_remote() local 5729 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote() 6836 prev = rq->curr; in __schedule() 7409 WARN_ON(p != rq->curr); in rt_mutex_setprio() 10758 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old() 10759 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old() [all …]
|
| A D | membarrier.c | 292 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 370 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 383 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 482 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
|
| A D | stats.h | 114 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev); 116 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr, in psi_account_irqtime() argument 230 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr, in psi_account_irqtime() argument
|
| A D | ext.c | 1914 struct task_struct *curr = rq->curr; in update_curr_scx() local 1922 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); in update_curr_scx() 1923 if (!curr->scx.slice) in update_curr_scx() 1924 touch_core_sched(rq, curr); in update_curr_scx() 2054 rq->curr->scx.slice = 0; in dispatch_enqueue() 3722 curr->scx.slice = 0; in task_tick_scx() 3723 touch_core_sched(rq, curr); in task_tick_scx() 3728 if (!curr->scx.slice) in task_tick_scx() 5328 rq->curr->comm, rq->curr->pid, in scx_dump_state() 5329 rq->curr->sched_class); in scx_dump_state() [all …]
|
| A D | syscalls.c | 206 if (rq->curr != rq->idle) in idle_cpu() 251 if (sched_core_enabled(rq) && rq->curr == rq->idle) in sched_core_idle_cpu() 1423 struct task_struct *curr = current; in yield_to() local 1443 if (!curr->sched_class->yield_to_task) in yield_to() 1446 if (curr->sched_class != p->sched_class) in yield_to() 1452 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
|
| A D | pelt.c | 310 cfs_rq->curr == se)) { in __update_load_avg_se() 326 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
|
| /kernel/futex/ |
| A D | core.c | 1074 if (owner != task_pid_vnr(curr)) in handle_futex_death() 1194 curr, pi, HANDLE_DEATH_LIST)) in exit_robust_list() 1339 WARN_ON(curr != current); in exit_pi_state_list() 1346 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 1367 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 1370 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 1374 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list() 1387 WARN_ON(pi_state->owner != curr); in exit_pi_state_list() 1392 raw_spin_unlock(&curr->pi_lock); in exit_pi_state_list() 1400 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() [all …]
|
| /kernel/ |
| A D | scs.c | 133 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local 144 while (used > curr) { in scs_check_usage() 145 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage() 147 if (prev == curr) { in scs_check_usage() 153 curr = prev; in scs_check_usage()
|
| /kernel/time/ |
| A D | timer_list.c | 65 struct timerqueue_node *curr; in print_active_timers() local 75 curr = timerqueue_getnext(&base->active); in print_active_timers() 80 while (curr && i < next) { in print_active_timers() 81 curr = timerqueue_iterate_next(curr); in print_active_timers() 85 if (curr) { in print_active_timers() 87 timer = container_of(curr, struct hrtimer, node); in print_active_timers()
|
| /kernel/power/ |
| A D | process.c | 182 struct task_struct *curr = current; in thaw_processes() local 200 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes() 205 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); in thaw_processes() 206 curr->flags &= ~PF_SUSPEND_TASK; in thaw_processes()
|
| /kernel/gcov/ |
| A D | fs.c | 677 char *curr; in add_node() local 687 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { in add_node() 688 if (curr == next) in add_node() 691 if (strcmp(curr, ".") == 0) in add_node() 693 if (strcmp(curr, "..") == 0) { in add_node() 699 node = get_child_by_name(parent, curr); in add_node() 701 node = new_node(parent, NULL, curr); in add_node() 708 node = new_node(parent, info, curr); in add_node()
|
| /kernel/trace/ |
| A D | fprobe.c | 346 int size, curr; in fprobe_return() local 357 curr = 0; in fprobe_return() 358 while (size_words > curr) { in fprobe_return() 359 read_fprobe_header(&fgraph_data[curr], &fp, &size); in fprobe_return() 362 curr += FPROBE_HEADER_SIZE_IN_LONG; in fprobe_return() 364 if (WARN_ON_ONCE(curr + size > size_words)) in fprobe_return() 367 size ? fgraph_data + curr : NULL); in fprobe_return() 369 curr += size; in fprobe_return()
|
| A D | trace_sched_wakeup.c | 420 struct task_struct *curr, in tracing_sched_wakeup_trace() argument 432 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace() 433 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace() 434 entry->prev_state = task_state_index(curr); in tracing_sched_wakeup_trace()
|
| A D | trace_functions_graph.c | 588 struct ftrace_graph_ent_entry *curr) in get_return_for_leaf() argument 600 curr = &data->ent.ent; in get_return_for_leaf() 630 if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) in get_return_for_leaf() 631 data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr; in get_return_for_leaf() 633 data->ent.ent = *curr; in get_return_for_leaf() 649 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf() 650 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
|
| /kernel/bpf/ |
| A D | cgroup_iter.c | 103 struct cgroup_subsys_state *curr = (struct cgroup_subsys_state *)v; in cgroup_iter_seq_next() local 111 return css_next_descendant_pre(curr, p->start_css); in cgroup_iter_seq_next() 113 return css_next_descendant_post(curr, p->start_css); in cgroup_iter_seq_next() 115 return curr->parent; in cgroup_iter_seq_next()
|
| /kernel/events/ |
| A D | uprobes.c | 1196 struct map_info *curr = NULL; in build_map_info() local 1227 info->next = curr; in build_map_info() 1228 curr = info; in build_map_info() 1238 prev = curr; in build_map_info() 1239 while (curr) { in build_map_info() 1240 mmput(curr->mm); in build_map_info() 1241 curr = curr->next; in build_map_info() 1247 curr = ERR_PTR(-ENOMEM); in build_map_info() 1258 return curr; in build_map_info()
|