| /kernel/locking/ |
| A D | osq_lock.c | 16 struct optimistic_spin_node *next, *prev; member 96 struct optimistic_spin_node *prev, *next; in osq_lock() local 114 prev = decode_cpu(old); in osq_lock() 115 node->prev = prev; in osq_lock() 129 WRITE_ONCE(prev->next, node); in osq_lock() 147 vcpu_is_preempted(node_cpu(node->prev)))) in osq_lock() 164 if (data_race(prev->next) == node && in osq_lock() 182 prev = READ_ONCE(node->prev); in osq_lock() 192 next = osq_wait_next(lock, node, prev->cpu); in osq_lock() 204 WRITE_ONCE(next->prev, prev); in osq_lock() [all …]
|
| A D | mcs_spinlock.h | 59 struct mcs_spinlock *prev; in mcs_spin_lock() local 71 prev = xchg(lock, node); in mcs_spin_lock() 72 if (likely(prev == NULL)) { in mcs_spin_lock() 83 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
|
| A D | qspinlock.c | 89 struct mcs_spinlock *prev) { } in __pv_wait_node() argument 132 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local 285 prev = decode_tail(old, qnodes); in queued_spin_lock_slowpath() 288 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath() 290 pv_wait_node(node, prev); in queued_spin_lock_slowpath()
|
| A D | lockdep.c | 2552 struct held_lock *prev, in print_bad_irq_dependency() argument 2578 print_lock(prev); in print_bad_irq_dependency() 2580 print_lock_name(prev, hlock_class(prev)); in print_bad_irq_dependency() 2859 prev, next, in check_irq_usage() 3029 print_lock(prev); in print_deadlock_bug() 3060 struct held_lock *prev; in check_deadlock() local 3068 nest = prev; in check_deadlock() 3080 class = hlock_class(prev); in check_deadlock() 3138 hlock_class(prev), in check_prev_add() 3415 int prev, curr; in add_chain_block() local [all …]
|
| A D | qspinlock_paravirt.h | 264 pv_wait_early(struct pv_node *prev, int loop) in pv_wait_early() argument 269 return READ_ONCE(prev->state) != VCPU_RUNNING; in pv_wait_early() 290 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument 293 struct pv_node *pp = (struct pv_node *)prev; in pv_wait_node()
|
| /kernel/bpf/ |
| A D | kmem_cache_iter.c | 38 struct kmem_cache *prev = kit->pos; in bpf_iter_kmem_cache_next() local 42 if (!prev) in bpf_iter_kmem_cache_next() 52 if (prev == KMEM_CACHE_POS_START) in bpf_iter_kmem_cache_next() 54 else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev) in bpf_iter_kmem_cache_next() 57 next = list_next_entry(prev, list); in bpf_iter_kmem_cache_next() 64 if (prev && prev != KMEM_CACHE_POS_START) { in bpf_iter_kmem_cache_next() 65 if (prev->refcount > 1) in bpf_iter_kmem_cache_next() 66 prev->refcount--; in bpf_iter_kmem_cache_next() 67 else if (prev->refcount == 1) in bpf_iter_kmem_cache_next() 74 kmem_cache_destroy(prev); in bpf_iter_kmem_cache_next()
|
| A D | rqspinlock.c | 220 u64 prev = ts->cur; in check_timeout() local 235 if (prev + NSEC_PER_MSEC < time) { in check_timeout() 350 struct mcs_spinlock *prev, *next, *node; in resilient_queued_spin_lock_slowpath() local 536 prev = decode_tail(old, rqnodes); in resilient_queued_spin_lock_slowpath() 539 WRITE_ONCE(prev->next, node); in resilient_queued_spin_lock_slowpath()
|
| /kernel/sched/ |
| A D | stats.h | 111 void psi_task_switch(struct task_struct *prev, struct task_struct *next, 114 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev); 117 struct task_struct *prev) {} in psi_account_irqtime() argument 213 static inline void psi_sched_switch(struct task_struct *prev, in psi_sched_switch() argument 220 psi_task_switch(prev, next, sleep); in psi_sched_switch() 227 static inline void psi_sched_switch(struct task_struct *prev, in psi_sched_switch() argument 231 struct task_struct *prev) {} in psi_account_irqtime() argument 319 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument 326 if (prev != rq->idle) in sched_info_switch() 327 sched_info_depart(rq, prev); in sched_info_switch()
|
| A D | cputime.c | 564 if (prev->stime + prev->utime >= rtime) in cputime_adjust() 603 if (stime < prev->stime) in cputime_adjust() 604 stime = prev->stime; in cputime_adjust() 611 if (utime < prev->utime) { in cputime_adjust() 612 utime = prev->utime; in cputime_adjust() 616 prev->stime = stime; in cputime_adjust() 617 prev->utime = utime; in cputime_adjust() 619 *ut = prev->utime; in cputime_adjust() 620 *st = prev->stime; in cputime_adjust() 779 struct vtime *vtime = &prev->vtime; in vtime_task_switch_generic() [all …]
|
| A D | core.c | 5156 rseq_preempt(prev); in prepare_task_switch() 5221 finish_task(prev); in finish_task_switch() 5255 prev->sched_class->task_dead(prev); in finish_task_switch() 5258 put_task_stack(prev); in finish_task_switch() 5357 switch_to(prev, next, prev); in context_switch() 5895 prev->comm, prev->pid, preempt_count()); in __schedule_bug() 5925 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug() 5927 prev->comm, prev->pid, prev->non_block_count); in schedule_debug() 6836 prev = rq->curr; in __schedule() 6886 next = prev; in __schedule() [all …]
|
| A D | autogroup.c | 162 struct autogroup *prev; in autogroup_move_group() local 169 prev = p->signal->autogroup; in autogroup_move_group() 170 if (prev == ag) { in autogroup_move_group() 191 autogroup_kref_put(prev); in autogroup_move_group()
|
| A D | psi.c | 926 void psi_task_switch(struct task_struct *prev, struct task_struct *next, in psi_task_switch() argument 930 int cpu = task_cpu(prev); in psi_task_switch() 954 if (prev->pid) { in psi_task_switch() 966 if (prev->in_memstall) in psi_task_switch() 968 if (prev->in_iowait) in psi_task_switch() 977 if (unlikely((prev->flags & PF_WQ_WORKER) && in psi_task_switch() 978 wq_worker_last_func(prev) == psi_avgs_work)) in psi_task_switch() 982 psi_flags_change(prev, clear, set); in psi_task_switch() 984 for_each_group(group, task_psi_group(prev)) { in psi_task_switch() 996 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { in psi_task_switch() [all …]
|
| A D | sched.h | 2444 WARN_ON_ONCE(rq->donor != prev); in put_prev_task() 2445 prev->sched_class->put_prev_task(rq, prev, NULL); in put_prev_task() 2458 prev->dl_server = NULL; in __put_prev_set_next_dl_server() 2464 struct task_struct *prev, in put_prev_set_next_task() argument 2467 WARN_ON_ONCE(rq->donor != prev); in put_prev_set_next_task() 2471 if (next == prev) in put_prev_set_next_task() 2474 prev->sched_class->put_prev_task(rq, prev, next); in put_prev_set_next_task() 3735 struct task_struct *prev, in switch_mm_cid() argument 3776 if (prev->mm_cid_active) { in switch_mm_cid() 3778 mm_cid_put_lazy(prev); in switch_mm_cid() [all …]
|
| A D | stop_task.c | 19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument 61 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next) in put_prev_task_stop() argument
|
| A D | idle.c | 442 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle() argument 455 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) in put_prev_task_idle() argument 457 dl_server_update_idle_time(rq, prev); in put_prev_task_idle()
|
| /kernel/trace/rv/monitors/sssw/ |
| A D | sssw.c | 32 struct task_struct *prev, in handle_sched_switch() argument 37 da_handle_event_sssw(prev, sched_switch_preempt_sssw); in handle_sched_switch() 39 da_handle_event_sssw(prev, sched_switch_yield_sssw); in handle_sched_switch() 42 da_handle_event_sssw(prev, sched_switch_blocking_sssw); in handle_sched_switch() 44 da_handle_event_sssw(prev, sched_switch_suspend_sssw); in handle_sched_switch()
|
| /kernel/ |
| A D | scs.c | 133 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local 145 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage() 147 if (prev == curr) { in scs_check_usage() 153 curr = prev; in scs_check_usage()
|
| A D | smp.c | 483 struct llist_node *entry, *prev; in __flush_smp_call_function_queue() local 532 prev = NULL; in __flush_smp_call_function_queue() 539 if (prev) { in __flush_smp_call_function_queue() 540 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue() 550 prev = &csd->node.llist; in __flush_smp_call_function_queue() 560 prev = NULL; in __flush_smp_call_function_queue() 565 if (prev) { in __flush_smp_call_function_queue() 566 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue() 584 prev = &csd->node.llist; in __flush_smp_call_function_queue()
|
| /kernel/gcov/ |
| A D | base.c | 108 struct gcov_info *prev = NULL; in gcov_module_notifier() local 117 gcov_info_unlink(prev, info); in gcov_module_notifier() 121 prev = info; in gcov_module_notifier()
|
| /kernel/dma/ |
| A D | pool.c | 224 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) in dma_guess_pool() argument 226 if (prev == NULL) { in dma_guess_pool() 233 if (prev == atomic_pool_kernel) in dma_guess_pool() 235 if (prev == atomic_pool_dma32) in dma_guess_pool()
|
| /kernel/trace/ |
| A D | trace_sched_wakeup.c | 393 struct task_struct *prev, in tracing_sched_switch_trace() argument 406 entry->prev_pid = prev->pid; in tracing_sched_switch_trace() 407 entry->prev_prio = prev->prio; in tracing_sched_switch_trace() 408 entry->prev_state = task_state_index(prev); in tracing_sched_switch_trace() 445 struct task_struct *prev, struct task_struct *next, in probe_wakeup_sched_switch() argument 455 tracing_record_cmdline(prev); in probe_wakeup_sched_switch() 491 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx); in probe_wakeup_sched_switch()
|
| A D | trace_sched_switch.c | 26 struct task_struct *prev, struct task_struct *next, in probe_sched_switch() argument 36 tracing_record_taskinfo_sched_switch(prev, next, flags); in probe_sched_switch() 403 void tracing_record_taskinfo_sched_switch(struct task_struct *prev, in tracing_record_taskinfo_sched_switch() argument 415 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); in tracing_record_taskinfo_sched_switch() 417 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); in tracing_record_taskinfo_sched_switch()
|
| /kernel/trace/rv/monitors/snroc/ |
| A D | snroc.c | 28 struct task_struct *prev, in handle_sched_switch() argument 32 da_handle_start_event_snroc(prev, sched_switch_out_snroc); in handle_sched_switch()
|
| /kernel/time/ |
| A D | tick-sched.c | 480 int prev; in tick_nohz_dep_set_all() local 482 prev = atomic_fetch_or(BIT(bit), dep); in tick_nohz_dep_set_all() 483 if (!prev) in tick_nohz_dep_set_all() 507 int prev; in tick_nohz_dep_set_cpu() local 512 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu() 513 if (!prev) { in tick_nohz_dep_set_cpu() 560 int prev; in tick_nohz_dep_set_signal() local 563 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_set_signal() 564 if (!prev) { in tick_nohz_dep_set_signal()
|
| /kernel/livepatch/ |
| A D | transition.c | 232 struct klp_func *prev; in klp_check_stack_func() local 234 prev = list_next_entry(func, stack_node); in klp_check_stack_func() 235 func_addr = (unsigned long)prev->new_func; in klp_check_stack_func() 236 func_size = prev->new_size; in klp_check_stack_func()
|