| /kernel/locking/ |
| A D | osq_lock.c | 81 if (node->next) { in osq_wait_next() 84 next = xchg(&node->next, NULL); in osq_wait_next() 85 if (next) in osq_wait_next() 86 return next; in osq_wait_next() 101 node->next = NULL; in osq_lock() 129 WRITE_ONCE(prev->next, node); in osq_lock() 193 if (!next) in osq_lock() 205 WRITE_ONCE(prev->next, next); in osq_lock() 225 next = xchg(&node->next, NULL); in osq_unlock() 226 if (next) { in osq_unlock() [all …]
|
| A D | qspinlock.c | 132 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local 252 node->next = NULL; in queued_spin_lock_slowpath() 278 next = NULL; in queued_spin_lock_slowpath() 288 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath() 299 next = READ_ONCE(node->next); in queued_spin_lock_slowpath() 300 if (next) in queued_spin_lock_slowpath() 301 prefetchw(next); in queued_spin_lock_slowpath() 367 if (!next) in queued_spin_lock_slowpath() 368 next = smp_cond_load_relaxed(&node->next, (VAL)); in queued_spin_lock_slowpath() 370 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath() [all …]
|
| A D | mcs_spinlock.h | 63 node->next = NULL; in mcs_spin_lock() 83 WRITE_ONCE(prev->next, node); in mcs_spin_lock() 96 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() local 98 if (likely(!next)) { in mcs_spin_unlock() 105 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock() 110 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
|
| A D | lockdep.c | 2553 struct held_lock *next, in print_bad_irq_dependency() argument 2575 print_lock(next); in print_bad_irq_dependency() 2582 print_lock_name(next, hlock_class(next)); in print_bad_irq_dependency() 2812 bfs_init_root(&that, next); in check_irq_usage() 2859 prev, next, in check_irq_usage() 3011 struct held_lock *next) in print_deadlock_bug() argument 3027 print_lock(next); in print_deadlock_bug() 3142 hlock_class(next), in check_prev_add() 3385 next &= ~CHAIN_BLK_FLAG; in chain_block_next() 3386 next <<= 16; in chain_block_next() [all …]
|
| /kernel/trace/rv/monitors/sleep/ |
| A D | sleep.h | 166 __set_bit(S0, next); in ltl_possible_next_states() 168 __set_bit(S1, next); in ltl_possible_next_states() 170 __set_bit(S4, next); in ltl_possible_next_states() 172 __set_bit(S5, next); in ltl_possible_next_states() 176 __set_bit(S1, next); in ltl_possible_next_states() 178 __set_bit(S2, next); in ltl_possible_next_states() 180 __set_bit(S3, next); in ltl_possible_next_states() 182 __set_bit(S4, next); in ltl_possible_next_states() 184 __set_bit(S6, next); in ltl_possible_next_states() 186 __set_bit(S7, next); in ltl_possible_next_states() [all …]
|
| /kernel/bpf/ |
| A D | kmem_cache_iter.c | 39 struct kmem_cache *next; in bpf_iter_kmem_cache_next() local 53 next = list_first_entry(&slab_caches, struct kmem_cache, list); in bpf_iter_kmem_cache_next() 55 next = NULL; in bpf_iter_kmem_cache_next() 57 next = list_next_entry(prev, list); in bpf_iter_kmem_cache_next() 60 if (next && next->refcount > 0) in bpf_iter_kmem_cache_next() 61 next->refcount++; in bpf_iter_kmem_cache_next() 76 kit->pos = next; in bpf_iter_kmem_cache_next() 77 return next; in bpf_iter_kmem_cache_next() 199 .next = kmem_cache_iter_seq_next,
|
| A D | rqspinlock.c | 350 struct mcs_spinlock *prev, *next, *node; in resilient_queued_spin_lock_slowpath() local 502 node->next = NULL; in resilient_queued_spin_lock_slowpath() 527 next = NULL; in resilient_queued_spin_lock_slowpath() 539 WRITE_ONCE(prev->next, node); in resilient_queued_spin_lock_slowpath() 553 next = READ_ONCE(node->next); in resilient_queued_spin_lock_slowpath() 554 if (next) in resilient_queued_spin_lock_slowpath() 555 prefetchw(next); in resilient_queued_spin_lock_slowpath() 605 next = smp_cond_load_relaxed(&node->next, VAL); in resilient_queued_spin_lock_slowpath() 606 WRITE_ONCE(next->locked, RES_TIMEOUT_VAL); in resilient_queued_spin_lock_slowpath() 643 if (!next) in resilient_queued_spin_lock_slowpath() [all …]
|
| /kernel/trace/ |
| A D | pid_list.c | 22 pid_list->lower_list = chunk->next; in get_lower_chunk() 25 chunk->next = NULL; in get_lower_chunk() 46 pid_list->upper_list = chunk->next; in get_upper_chunk() 49 chunk->next = NULL; in get_upper_chunk() 65 chunk->next = pid_list->lower_list; in put_lower_chunk() 75 chunk->next = pid_list->upper_list; in put_upper_chunk() 270 unsigned int *next) in trace_pid_list_next() argument 357 upper_next = &chunk->next; in pid_list_refill_irq() 368 lower_next = &chunk->next; in pid_list_refill_irq() 429 chunk->next = pid_list->upper_list; in trace_pid_list_alloc() [all …]
|
| A D | rethook.c | 28 node = node->next; in rethook_flush_task() 214 node = node->next; in __rethook_find_ret_addr() 222 node = node->next; in __rethook_find_ret_addr() 317 first = first->next; in rethook_trampoline_handler() 325 current->rethooks.first = node->next; in rethook_trampoline_handler() 326 node->next = NULL; in rethook_trampoline_handler() 330 first = first->next; in rethook_trampoline_handler()
|
| A D | ftrace.c | 325 *p = (*p)->next; in remove_ftrace_ops() 469 pg = pg->next; in function_stat_next() 600 pg = pg->next; in ftrace_profile_reset() 641 if (!pg->next) in ftrace_profile_pages_init() 643 pg = pg->next; in ftrace_profile_pages_init() 653 pg = pg->next; in ftrace_profile_pages_init() 3936 next = func_probes->next; in t_probe_next() 3958 next = iter->probe->list.next; in t_probe_next() 5720 if (!next) { in ftrace_process_regex() 6287 if (next) { in ftrace_set_regex() [all …]
|
| A D | trace_sched_switch.c | 26 struct task_struct *prev, struct task_struct *next, in probe_sched_switch() argument 36 tracing_record_taskinfo_sched_switch(prev, next, flags); in probe_sched_switch() 404 struct task_struct *next, int flags) in tracing_record_taskinfo_sched_switch() argument 416 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); in tracing_record_taskinfo_sched_switch() 418 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); in tracing_record_taskinfo_sched_switch() 495 .next = saved_tgids_next, 574 .next = saved_cmdlines_next,
|
| /kernel/printk/ |
| A D | printk_ringbuffer.c | 330 LPOS_DATALESS((blk)->next)) 624 lpos_begin = blk_lpos->next; in data_make_reusable() 1057 blk_lpos->next = FAILED_LPOS; in data_alloc() 1096 blk_lpos->next = next_lpos; in data_alloc() 1124 if (head_lpos != blk_lpos->next) in data_realloc() 1177 blk_lpos->next = next_lpos; in data_realloc() 1256 WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { in get_data() 1794 const char *next = text; in count_lines() local 1797 next = memchr(next, '\n', next_size); in count_lines() 1798 if (!next) in count_lines() [all …]
|
| /kernel/ |
| A D | task_work.c | 72 work->next = head; in task_work_add() 130 pprev = &work->next; in task_work_cancel_match() 132 } else if (try_cmpxchg(pprev, &work, work->next)) in task_work_cancel_match() 197 struct callback_head *work, *head, *next; in task_work_run() local 226 next = work->next; in task_work_run() 228 work = next; in task_work_run()
|
| A D | jump_label.c | 623 struct static_key_mod *next; member 647 key->next = mod; in static_key_set_mod() 752 jlm2->next = NULL; in jump_label_add_module() 758 jlm->next = static_key_mod(key); in jump_label_add_module() 796 prev = &key->next; in jump_label_del_module() 800 prev = &jlm->next; in jump_label_del_module() 801 jlm = jlm->next; in jump_label_del_module() 808 if (prev == &key->next) in jump_label_del_module() 809 static_key_set_mod(key, jlm->next); in jump_label_del_module() 811 *prev = jlm->next; in jump_label_del_module() [all …]
|
| A D | resource.c | 147 .next = r_next, 692 goto next; in __find_resource_space() 887 for (next = first; ; next = next->sibling) { in __insert_resource() 889 if (next->start < new->start || next->end > new->end) in __insert_resource() 890 return next; in __insert_resource() 891 if (!next->sibling) in __insert_resource() 901 next->sibling = NULL; in __insert_resource() 902 for (next = first; next; next = next->sibling) in __insert_resource() 903 next->parent = new; in __insert_resource() 910 next = next->sibling; in __insert_resource() [all …]
|
| A D | static_call_inline.c | 157 .next = static_call_key_next(key), in __static_call_update() 162 for (site_mod = &first; site_mod; site_mod = site_mod->next) { in __static_call_update() 269 site_mod->next = NULL; in __static_call_init() 281 site_mod->next = static_call_key_next(key); in __static_call_init() 431 prev = &site_mod->next, site_mod = site_mod->next) in static_call_del_module() 437 *prev = site_mod->next; in static_call_del_module()
|
| /kernel/rcu/ |
| A D | tiny.c | 103 struct rcu_head *next, *list; in rcu_process_callbacks() local 123 next = list->next; in rcu_process_callbacks() 124 prefetch(next); in rcu_process_callbacks() 127 list = next; in rcu_process_callbacks() 174 head->next = NULL; in call_rcu() 178 rcu_ctrlblk.curtail = &head->next; in call_rcu()
|
| A D | rcu_segcblist.c | 31 rclp->tail = &rhp->next; in rcu_cblist_enqueue() 56 rhp->next = NULL; in rcu_cblist_flush_enqueue() 58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue() 75 rclp->head = rhp->next; in rcu_cblist_dequeue() 334 rhp->next = NULL; in rcu_segcblist_enqueue() 336 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); in rcu_segcblist_enqueue() 358 rhp->next = NULL; in rcu_segcblist_entrain() 365 WRITE_ONCE(rsclp->tails[i], &rhp->next); in rcu_segcblist_entrain()
|
| /kernel/time/ |
| A D | tick-common.c | 111 ktime_t next = dev->next_event; in tick_handle_periodic() local 130 next = ktime_add_ns(next, TICK_NSEC); in tick_handle_periodic() 132 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic() 164 ktime_t next; in tick_setup_periodic() local 168 next = tick_next_period; in tick_setup_periodic() 174 if (!clockevents_program_event(dev, next, false)) in tick_setup_periodic() 176 next = ktime_add_ns(next, TICK_NSEC); in tick_setup_periodic()
|
| /kernel/sched/ |
| A D | stats.h | 111 void psi_task_switch(struct task_struct *prev, struct task_struct *next, 214 struct task_struct *next, in psi_sched_switch() argument 220 psi_task_switch(prev, next, sleep); in psi_sched_switch() 228 struct task_struct *next, in psi_sched_switch() argument 319 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument 329 if (next != rq->idle) in sched_info_switch() 330 sched_info_arrive(rq, next); in sched_info_switch() 336 # define sched_info_switch(rq, t, next) do { } while (0) argument
|
| A D | core.c | 5007 next = head->next; in do_balance_callbacks() 5009 head = next; in do_balance_callbacks() 5341 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch() 6300 return next; in pick_next_task() 6326 goto next; in try_steal_cookie() 6329 goto next; in try_steal_cookie() 6332 goto next; in try_steal_cookie() 6340 goto next; in try_steal_cookie() 6348 next: in try_steal_cookie() 6905 next = find_proxy_task(rq, next, &rf); in __schedule() [all …]
|
| A D | sched.h | 645 struct balance_callback *next; member 673 struct sched_entity *next; member 804 int next; /* next highest */ member 847 u64 next; member 945 struct perf_domain *next; member 2450 next->sched_class->set_next_task(rq, next, false); in set_next_task() 2471 if (next == prev) in put_prev_set_next_task() 2475 next->sched_class->set_next_task(rq, next, true); in put_prev_set_next_task() 3736 struct task_struct *next) in switch_mm_cid() argument 3781 if (next->mm_cid_active) in switch_mm_cid() [all …]
|
| /kernel/trace/rv/ |
| A D | rv_trace.h | 135 TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next), 137 TP_ARGS(task, states, atoms, next), 144 __string(next, next) 152 __assign_str(next); 156 __get_str(states), __get_str(atoms), __get_str(next))
|
| /kernel/gcov/ |
| A D | gcc_4_7.c | 94 struct gcov_info *next; member 136 return info->next; in gcov_info_next() 145 info->next = gcov_info_head; in gcov_info_link() 157 prev->next = info->next; in gcov_info_unlink() 159 gcov_info_head = info->next; in gcov_info_unlink() 291 dup->next = NULL; in gcov_info_dup()
|
| /kernel/irq/ |
| A D | ipi.c | 56 unsigned int next; in irq_reserve_ipi() local 69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi() 70 if (next < nr_cpu_ids) in irq_reserve_ipi() 71 next = cpumask_next(next, dest); in irq_reserve_ipi() 72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
|