| /kernel/bpf/ |
| A D | percpu_freelist.c | 17 raw_res_spin_lock_init(&head->lock); in pcpu_freelist_init() 18 head->first = NULL; in pcpu_freelist_init() 31 node->next = head->first; in pcpu_freelist_push_node() 32 WRITE_ONCE(head->first, node); in pcpu_freelist_push_node() 38 if (raw_res_spin_lock(&head->lock)) in ___pcpu_freelist_push() 41 raw_res_spin_unlock(&head->lock); in ___pcpu_freelist_push() 48 struct pcpu_freelist_head *head; in __pcpu_freelist_push() local 81 struct pcpu_freelist_head *head; in pcpu_freelist_populate() local 103 struct pcpu_freelist_head *head; in ___pcpu_freelist_pop() local 108 if (!READ_ONCE(head->first)) in ___pcpu_freelist_pop() [all …]
|
| A D | hashtab.c | 797 head = &b->head; in htab_lru_map_delete_node() 1101 head = &b->head; in htab_map_update_elem() 1209 head = &b->head; in htab_lru_map_update_elem() 1278 head = &b->head; in htab_map_update_elem_in_place() 1341 head = &b->head; in __htab_lru_percpu_map_update_elem() 1418 head = &b->head; in htab_map_delete_elem() 1454 head = &b->head; in htab_lru_map_delete_elem() 1598 head = &b->head; in __htab_map_lookup_and_delete_elem() 1752 head = &b->head; in __htab_map_lookup_and_delete_batch() 2021 head = &b->head; in bpf_hash_map_seq_find_next() [all …]
|
| A D | queue_stack_maps.c | 20 u32 head, tail; member 33 return qs->head == qs->tail; in queue_stack_map_is_empty() 38 u32 head = qs->head + 1; in queue_stack_map_is_full() local 40 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 41 head = 0; in queue_stack_map_is_full() 43 return head == qs->tail; in queue_stack_map_is_full() 142 index = qs->head - 1; in __stack_map_get() 150 qs->head = index; in __stack_map_get() 212 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem() 215 if (unlikely(++qs->head >= qs->size)) in queue_stack_map_push_elem() [all …]
|
| A D | devmap.c | 219 struct hlist_head *head; in dev_map_free() local 222 head = dev_map_index_hash(dtab, i); in dev_map_free() 281 hlist_for_each_entry_rcu(dev, head, index_hlist, in __dev_map_hash_lookup_elem() 295 struct hlist_head *head; in dev_map_hash_get_next_key() local 320 head = dev_map_index_hash(dtab, i); in dev_map_hash_get_next_key() 612 struct hlist_head *head; in dev_map_enqueue_multi() local 646 head = dev_map_index_hash(dtab, i); in dev_map_enqueue_multi() 729 struct hlist_head *head; in dev_map_redirect_multi() local 765 head = dev_map_index_hash(dtab, i); in dev_map_redirect_multi() 1084 struct hlist_head *head; in dev_map_hash_remove_netdev() local [all …]
|
| A D | stream.c | 267 struct llist_node *head, *tail; in bpf_stream_backlog_fill() local 274 head = llist_reverse_order(tail); in bpf_stream_backlog_fill() 277 stream->backlog_head = head; in bpf_stream_backlog_fill() 280 stream->backlog_tail->next = head; in bpf_stream_backlog_fill() 456 struct llist_node *list, *head, *tail; in bpf_stream_stage_commit() local 469 head = tail = list; in bpf_stream_stage_commit() 477 llist_add_batch(head, tail, &stream->log); in bpf_stream_stage_commit()
|
| /kernel/gcov/ |
| A D | clang.c | 58 struct list_head head; member 68 struct list_head head; member 89 INIT_LIST_HEAD(&info->head); in llvm_gcov_init() 120 INIT_LIST_HEAD(&info->head); in llvm_gcda_emit_function() 131 struct gcov_fn_info, head); in llvm_gcda_emit_arcs() 177 struct gcov_info, head); in gcov_info_next() 200 __list_del_entry(&info->head); in gcov_info_unlink() 276 struct gcov_fn_info, head); in gcov_info_add() 295 INIT_LIST_HEAD(&fn_dup->head); in gcov_fn_info_dup() 323 INIT_LIST_HEAD(&dup->head); in gcov_info_dup() [all …]
|
| /kernel/rcu/ |
| A D | rcu_segcblist.c | 20 rclp->head = NULL; in rcu_cblist_init() 21 rclp->tail = &rclp->head; in rcu_cblist_init() 47 drclp->head = srclp->head; in rcu_cblist_flush_enqueue() 48 if (drclp->head) in rcu_cblist_flush_enqueue() 57 srclp->head = rhp; in rcu_cblist_flush_enqueue() 71 rhp = rclp->head; in rcu_cblist_dequeue() 76 if (!rclp->head) in rcu_cblist_dequeue() 436 if (!rclp->head) in rcu_segcblist_insert_done_cbs() 440 WRITE_ONCE(rsclp->head, rclp->head); in rcu_segcblist_insert_done_cbs() 446 rclp->head = NULL; in rcu_segcblist_insert_done_cbs() [all …]
|
| A D | tiny.c | 91 trace_rcu_invoke_callback("", head); in rcu_reclaim_tiny() 92 f = head->func; in rcu_reclaim_tiny() 93 debug_rcu_head_callback(head); in rcu_reclaim_tiny() 95 f(head); in rcu_reclaim_tiny() 165 if (debug_rcu_head_queue(head)) { in call_rcu() 167 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in call_rcu() 168 mem_dump_obj(head); in call_rcu() 173 head->func = func; in call_rcu() 174 head->next = NULL; in call_rcu() 177 *rcu_ctrlblk.curtail = head; in call_rcu() [all …]
|
| A D | update.c | 402 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument 406 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu() 428 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp() 444 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp() 458 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument 460 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head() 464 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument 466 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head() 504 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head_on_stack() 607 static struct rcu_head head; in early_boot_test_call_rcu() local [all …]
|
| A D | rcu.h | 232 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument 236 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue() 237 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue() 243 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument 245 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue() 248 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue() 251 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument 256 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
|
| /kernel/ |
| A D | notifier.c | 171 ret = notifier_chain_register(&nh->head, n, true); in atomic_notifier_chain_register_unique_prio() 193 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister() 241 return !rcu_access_pointer(nh->head); in atomic_notifier_call_chain_is_empty() 324 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister() 327 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister() 343 if (rcu_access_pointer(nh->head)) { in blocking_notifier_call_chain_robust() 378 if (rcu_access_pointer(nh->head)) { in blocking_notifier_call_chain() 422 return notifier_chain_unregister(&nh->head, n); in raw_notifier_chain_unregister() 513 return notifier_chain_unregister(&nh->head, n); in srcu_notifier_chain_unregister() 516 ret = notifier_chain_unregister(&nh->head, n); in srcu_notifier_chain_unregister() [all …]
|
| A D | task_work.c | 57 struct callback_head *head; in task_work_add() local 68 head = READ_ONCE(task->task_works); in task_work_add() 70 if (unlikely(head == &work_exited)) in task_work_add() 72 work->next = head; in task_work_add() 73 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add() 197 struct callback_head *work, *head, *next; in task_work_run() local 206 head = NULL; in task_work_run() 209 head = &work_exited; in task_work_run() 213 } while (!try_cmpxchg(&task->task_works, &work, head)); in task_work_run()
|
| A D | softirq.c | 759 struct tasklet_struct *head; member 770 struct tasklet_head *head; in __tasklet_schedule_common() local 774 head = this_cpu_ptr(headp); in __tasklet_schedule_common() 776 *head->tail = t; in __tasklet_schedule_common() 777 head->tail = &(t->next); in __tasklet_schedule_common() 814 list = tl_head->head; in tasklet_action_common() 815 tl_head->head = NULL; in tasklet_action_common() 816 tl_head->tail = &tl_head->head; in tasklet_action_common() 946 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 948 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() [all …]
|
| A D | user-return-notifier.c | 39 struct hlist_head *head; in fire_user_return_notifiers() local 41 head = &get_cpu_var(return_notifier_list); in fire_user_return_notifiers() 42 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
|
| A D | kprobes.c | 380 struct hlist_head *head; in get_kprobe() local 885 struct hlist_head *head; in optimize_all_kprobes() local 897 head = &kprobe_table[i]; in optimize_all_kprobes() 909 struct hlist_head *head; in unoptimize_all_kprobes() local 921 head = &kprobe_table[i]; in unoptimize_all_kprobes() 2589 struct hlist_head *head; in kprobes_module_callback() local 2653 struct hlist_head *head; in kprobe_free_init_mem() local 2781 struct hlist_head *head; in show_kprobe_addr() local 2788 head = &kprobe_table[i]; in show_kprobe_addr() 2857 struct hlist_head *head; in arm_all_kprobes() local [all …]
|
| /kernel/events/ |
| A D | ring_buffer.c | 57 unsigned long head; in perf_output_put_handle() local 80 head = local_read(&rb->head); in perf_output_put_handle() 129 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle() 201 head = offset; in __perf_output_begin() 223 head += size; in __perf_output_begin() 225 head -= size; in __perf_output_begin() 226 } while (!local_try_cmpxchg(&rb->head, &offset, head)); in __perf_output_begin() 229 offset = head; in __perf_output_begin() 230 head = (u64)(-head); in __perf_output_begin() 423 handle->head = aux_head; in perf_aux_output_begin() [all …]
|
| /kernel/futex/ |
| A D | syscalls.c | 34 if (unlikely(len != sizeof(*head))) in SYSCALL_DEFINE2() 37 current->robust_list = head; in SYSCALL_DEFINE2() 52 struct robust_list_head __user *head; in SYSCALL_DEFINE3() local 71 head = p->robust_list; in SYSCALL_DEFINE3() 74 if (put_user(sizeof(*head), len_ptr)) in SYSCALL_DEFINE3() 76 return put_user(head, head_ptr); in SYSCALL_DEFINE3() 446 if (unlikely(len != sizeof(*head))) in COMPAT_SYSCALL_DEFINE2() 449 current->compat_robust_list = head; in COMPAT_SYSCALL_DEFINE2() 458 struct compat_robust_list_head __user *head; in COMPAT_SYSCALL_DEFINE3() local 477 head = p->compat_robust_list; in COMPAT_SYSCALL_DEFINE3() [all …]
|
| A D | core.c | 1134 struct robust_list __user * __user *head, in fetch_robust_entry() argument 1172 if (get_user(futex_offset, &head->futex_offset)) in exit_robust_list() 1182 while (entry != &head->list) { in exit_robust_list() 1233 if (get_user(*uentry, head)) in compat_fetch_robust_entry() 1267 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list() 1274 &head->list_op_pending, &pip)) in compat_exit_robust_list() 1347 while (!list_empty(head)) { in exit_pi_state_list() 1348 next = head->next; in exit_pi_state_list() 1379 if (head->next != next) { in exit_pi_state_list() 1542 static void futex_ref_rcu(struct rcu_head *head); [all …]
|
| /kernel/power/ |
| A D | console.c | 24 struct list_head head; member 52 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required() 67 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required() 84 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister() 86 list_del(&tmp->head); in pm_vt_switch_unregister() 120 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
|
| /kernel/sched/ |
| A D | wait.c | 13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head() 54 struct list_head *head = &wq_head->head; in add_wait_queue_priority_exclusive() local 60 if (!list_empty(head) && in add_wait_queue_priority_exclusive() 61 (list_first_entry(head, typeof(*wq_entry), entry)->flags & WQ_FLAG_PRIORITY)) in add_wait_queue_priority_exclusive() 64 list_add(&wq_entry->entry, head); in add_wait_queue_priority_exclusive() 99 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 101 if (&curr->entry == &wq_head->head) in __wake_up_common() 104 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 271 was_empty = list_empty(&wq_head->head); in prepare_to_wait_exclusive()
|
| /kernel/trace/ |
| A D | fprobe.c | 57 struct hlist_head *head; in find_first_fprobe_node() local 60 hlist_for_each_entry_rcu(node, head, hlist, in find_first_fprobe_node() 74 struct hlist_head *head; in insert_fprobe_node() local 84 hlist_add_head_rcu(&node->hlist, head); in insert_fprobe_node() 103 struct hlist_head *head; in is_fprobe_still_exist() local 106 head = &fprobe_table[hash_ptr(fp, FPROBE_HASH_BITS)]; in is_fprobe_still_exist() 107 hlist_for_each_entry_rcu(fph, head, hlist, in is_fprobe_still_exist() 119 struct hlist_head *head; in add_fprobe_hash() local 129 head = &fprobe_table[hash_ptr(fp, FPROBE_HASH_BITS)]; in add_fprobe_hash() 130 hlist_add_head_rcu(&fp->hlist_array->hlist, head); in add_fprobe_hash() [all …]
|
| A D | rethook.c | 35 static void rethook_free_rcu(struct rcu_head *head) in rethook_free_rcu() argument 37 struct rethook *rh = container_of(head, struct rethook, rcu); in rethook_free_rcu() 79 static int rethook_fini_pool(struct objpool_head *head, void *context) in rethook_fini_pool() argument 127 static void free_rethook_node_rcu(struct rcu_head *head) in free_rethook_node_rcu() argument 129 struct rethook_node *node = container_of(head, struct rethook_node, rcu); in free_rethook_node_rcu()
|
| A D | trace_events_inject.c | 141 struct list_head *head; in trace_get_entry_size() local 144 head = trace_get_fields(call); in trace_get_entry_size() 145 list_for_each_entry(field, head, link) { in trace_get_entry_size() 157 struct list_head *head; in trace_alloc_entry() local 165 head = trace_get_fields(call); in trace_alloc_entry() 166 list_for_each_entry(field, head, link) { in trace_alloc_entry()
|
| A D | trace_syscalls.c | 594 struct hlist_head *head; in perf_syscall_enter() local 618 head = this_cpu_ptr(sys_data->enter_event->perf_events); in perf_syscall_enter() 620 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_enter() 638 hlist_empty(head)) { in perf_syscall_enter() 645 head, NULL); in perf_syscall_enter() 704 struct hlist_head *head; in perf_syscall_exit() local 727 head = this_cpu_ptr(sys_data->exit_event->perf_events); in perf_syscall_exit() 729 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_exit() 745 hlist_empty(head)) { in perf_syscall_exit() 751 1, regs, head, NULL); in perf_syscall_exit()
|
| /kernel/cgroup/ |
| A D | rstat.c | 196 struct cgroup_subsys_state *head, in css_rstat_push_children() argument 211 lockdep_assert_held(ss_rstat_lock(head->ss)); in css_rstat_push_children() 237 child->rstat_flush_next = head; in css_rstat_push_children() 238 head = child; in css_rstat_push_children() 257 return head; in css_rstat_push_children() 282 struct cgroup_subsys_state *head = NULL, *parent, *child; in css_rstat_updated_list() local 314 head = root; in css_rstat_updated_list() 319 head = css_rstat_push_children(head, child, cpu); in css_rstat_updated_list() 321 return head; in css_rstat_updated_list()
|